aboutsummaryrefslogtreecommitdiff
path: root/lib/hadoop-0.20.0/contrib/hdfsproxy
diff options
context:
space:
mode:
Diffstat (limited to 'lib/hadoop-0.20.0/contrib/hdfsproxy')
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/README30
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy170
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh67
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh141
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh34
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh68
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh37
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh28
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml183
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl24
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml59
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh44
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template44
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts1
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties61
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml26
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml28
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jarbin0 -> 21572 bytes
18 files changed, 1045 insertions, 0 deletions
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/README b/lib/hadoop-0.20.0/contrib/hdfsproxy/README
new file mode 100644
index 0000000000..2c33988926
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/README
@@ -0,0 +1,30 @@
+HDFSPROXY is an HTTPS proxy server that exposes the same HSFTP interface as a
+real cluster. It authenticates users via user certificates and enforce access
+control based on configuration files.
+
+Starting up an HDFSPROXY server is similar to starting up an HDFS cluster.
+Simply run "hdfsproxy" shell command. The main configuration file is
+hdfsproxy-default.xml, which should be on the classpath. hdfsproxy-env.sh
+can be used to set up environmental variables. In particular, JAVA_HOME should
+be set. Additional configuration files include user-certs.xml,
+user-permissions.xml and ssl-server.xml, which are used to specify allowed user
+certs, allowed directories/files, and ssl keystore information for the proxy,
+respectively. The location of these files can be specified in
+hdfsproxy-default.xml. Environmental variable HDFSPROXY_CONF_DIR can be used to
+point to the directory where these configuration files are located. The
+configuration files of the proxied HDFS cluster should also be available on the
+classpath (hdfs-default.xml and hdfs-site.xml).
+
+Mirroring those used in HDFS, a few shell scripts are provided to start and
+stop a group of proxy servers. The hosts to run hdfsproxy on are specified in
+hdfsproxy-hosts file, one host per line. All hdfsproxy servers are stateless
+and run independently from each other. Simple load balancing can be set up by
+mapping all hdfsproxy server IP addresses to a single hostname. Users should
+use that hostname to access the proxy. If an IP address look up for that
+hostname returns more than one IP addresses, an HFTP/HSFTP client will randomly
+pick one to use.
+
+Command "hdfsproxy -reloadPermFiles" can be used to trigger reloading of
+user-certs.xml and user-permissions.xml files on all proxy servers listed in
+the hdfsproxy-hosts file. Similarly, "hdfsproxy -clearUgiCache" command can be
+used to clear the UGI caches on all proxy servers.
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy
new file mode 100755
index 0000000000..1b1e597891
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The HdfsProxy command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HDFSPROXY_CLASSPATH Extra Java CLASSPATH entries.
+#
+# HDFSPROXY_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# HDFSPROXY_OPTS Extra Java runtime options.
+#
+# HDFSPROXY_NAMENODE_OPTS These options are added to HDFSPROXY_OPTS
+# HDFSPROXY_CLIENT_OPTS when the respective command is run.
+# HDFSPROXY_{COMMAND}_OPTS etc HDFSPROXY_JT_OPTS applies to JobTracker
+# for e.g. HDFSPROXY_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+#
+# HDFSPROXY_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$HDFSPROXY_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $HDFSPROXY_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$HDFSPROXY_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HDFSPROXY_CONF_DIR
+CLASSPATH="${HDFSPROXY_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# for developers, add HdfsProxy classes to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
+fi
+if [ -d "$HDFSPROXY_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build
+fi
+if [ -d "$HDFSPROXY_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/classes
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# for releases, add hdfsproxy jar & webapps to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME
+fi
+for f in $HDFSPROXY_HOME/hdfsproxy-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/lib" ]; then
+ for f in $HDFSPROXY_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+
+if [ -d "$HDFSPROXY_HOME/../../" ]; then
+ for f in $HDFSPROXY_HOME/../../*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+if [ -d "$HDFSPROXY_HOME/../../lib" ]; then
+ for f in $HDFSPROXY_HOME/../../lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+if [ -d "$HDFSPROXY_HOME/../../lib/jsp-2.1" ]; then
+ for f in $HDFSPROXY_HOME/../../lib/jsp-2.1/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+
+
+# add user-specified CLASSPATH last
+if [ "$HDFSPROXY_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HDFSPROXY_CLASSPATH}
+fi
+
+# default log directory & file
+if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
+ HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
+fi
+if [ "$HDFSPROXY_LOGFILE" = "" ]; then
+ HDFSPROXY_LOGFILE='hdfsproxy.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+CLASS='org.apache.hadoop.hdfsproxy.HdfsProxy'
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ HDFSPROXY_HOME=`cygpath -d "$HDFSPROXY_HOME"`
+ HDFSPROXY_LOG_DIR=`cygpath -d "$HDFSPROXY_LOG_DIR"`
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.dir=$HDFSPROXY_LOG_DIR"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.file=$HDFSPROXY_LOGFILE"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.home.dir=$HDFSPROXY_HOME"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.id.str=$HDFSPROXY_IDENT_STRING"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.root.logger=${HDFSPROXY_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+# run it
+exec "$JAVA" $JAVA_HEAP_MAX $HDFSPROXY_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh
new file mode 100755
index 0000000000..8fe6aac68b
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# resolve links - $0 may be a softlink
+
+this="$0"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# the root of the HdfsProxy installation
+export HDFSPROXY_HOME=`dirname "$this"`/..
+
+#check to see if the conf dir is given as an optional argument
+if [ $# -gt 1 ]
+then
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ HDFSPROXY_CONF_DIR=$confdir
+ fi
+fi
+
+# Allow alternate conf dir location.
+HDFSPROXY_CONF_DIR="${HDFSPROXY_CONF_DIR:-$HDFSPROXY_HOME/conf}"
+
+#check to see it is specified whether to use the slaves file
+if [ $# -gt 1 ]
+then
+ if [ "--hosts" = "$1" ]
+ then
+ shift
+ slavesfile=$1
+ shift
+ export HDFSPROXY_SLAVES="${HDFSPROXY_CONF_DIR}/$slavesfile"
+ fi
+fi
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
new file mode 100755
index 0000000000..6d5a75247f
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a HdfsProxy as a daemon.
+#
+# Environment Variables
+#
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+# HDFSPROXY_LOG_DIR Where log files are stored. PWD by default.
+# HDFSPROXY_MASTER host:path where hdfsproxy code should be rsync'd from
+# HDFSPROXY_PID_DIR The pid files are stored. /tmp by default.
+# HDFSPROXY_IDENT_STRING A string representing this instance of hdfsproxy. $USER by default
+# HDFSPROXY_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hdfsproxy-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# get arguments
+startStop=$1
+shift
+
+hdfsproxy_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+# get log directory
+if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
+ export HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
+fi
+mkdir -p "$HDFSPROXY_LOG_DIR"
+
+if [ "$HDFSPROXY_PID_DIR" = "" ]; then
+ HDFSPROXY_PID_DIR=/tmp
+fi
+
+if [ "$HDFSPROXY_IDENT_STRING" = "" ]; then
+ export HDFSPROXY_IDENT_STRING="$USER"
+fi
+
+# some variables
+export HDFSPROXY_LOGFILE=hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.log
+export HDFSPROXY_ROOT_LOGGER="INFO,DRFA"
+log=$HDFSPROXY_LOG_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.out
+pid=$HDFSPROXY_PID_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING.pid
+
+# Set default scheduling priority
+if [ "$HDFSPROXY_NICENESS" = "" ]; then
+ export HDFSPROXY_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$HDFSPROXY_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo hdfsproxy running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$HDFSPROXY_MASTER" != "" ]; then
+ echo rsync from $HDFSPROXY_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HDFSPROXY_MASTER/ "$HDFSPROXY_HOME"
+ fi
+
+ hdfsproxy_rotate_log $log
+ echo starting hdfsproxy, logging to $log
+ cd "$HDFSPROXY_HOME"
+ nohup nice -n $HDFSPROXY_NICENESS "$HDFSPROXY_HOME"/bin/hdfsproxy --config $HDFSPROXY_CONF_DIR "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo stopping hdfsproxy
+ kill `cat $pid`
+ else
+ echo no hdfsproxy to stop
+ fi
+ else
+ echo no hdfsproxy to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
new file mode 100755
index 0000000000..7dd8568a3b
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a HdfsProxy command on all slave hosts.
+
+usage="Usage: hdfsproxy-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. $bin/hdfsproxy-config.sh
+
+exec "$bin/hdfsproxy-slaves.sh" --config $HDFSPROXY_CONF_DIR cd "$HDFSPROXY_HOME" \; "$bin/hdfsproxy-daemon.sh" --config $HDFSPROXY_CONF_DIR "$@"
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
new file mode 100755
index 0000000000..db54bd5b38
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# HDFSPROXY_SLAVES File naming remote hosts.
+# Default is ${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts.
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+# HDFSPROXY_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HDFSPROXY_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: hdfsproxy-slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# If the slaves file is specified in the command line,
+# then it takes precedence over the definition in
+# hdfsproxy-env.sh. Save it here.
+HOSTLIST=$HDFSPROXY_SLAVES
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HDFSPROXY_SLAVES" = "" ]; then
+ export HOSTLIST="${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts"
+ else
+ export HOSTLIST="${HDFSPROXY_SLAVES}"
+ fi
+fi
+
+for slave in `cat "$HOSTLIST"`; do
+ ssh $HDFSPROXY_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$HDFSPROXY_SLAVE_SLEEP" != "" ]; then
+ sleep $HDFSPROXY_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh
new file mode 100755
index 0000000000..2592d9c8cc
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hdfsproxy daemons.
+# Run this on master node.
+
+usage="Usage: start-hdfsproxy.sh"
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+# start hdfsproxy daemons
+# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR start
+"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts start
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
new file mode 100755
index 0000000000..78089e31cf
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hdfsproxy daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR stop
+"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts stop
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml
new file mode 100644
index 0000000000..e62b2f279a
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<project name="hdfsproxy" default="jar">
+ <property name="hdfsproxyVersion" value="1.0"/>
+ <property name="final.name" value="${ant.project.name}-${hdfsproxyVersion}"/>
+ <property name="bin.dir" value="${basedir}/bin"/>
+ <property name="lib.dir" value="${basedir}/lib"/>
+ <property name="conf.dir" value="${basedir}/conf"/>
+ <property name="docs.dir" value="${basedir}/docs"/>
+ <import file="../build-contrib.xml"/>
+
+ <target name="jar" depends="compile" description="Create jar">
+ <echo>
+ Building the .jar files.
+ </echo>
+ <jar jarfile="${build.dir}/${final.name}.jar" basedir="${build.classes}" includes="org/apache/hadoop/hdfsproxy/**/*.class" >
+ <manifest>
+ <section name="org/apache/hadoop/hdfsproxy">
+ <attribute name="Implementation-Title" value="HdfsProxy"/>
+ <attribute name="Implementation-Version" value="${hdfsproxyVersion}"/>
+ <attribute name="Implementation-Vendor" value="Apache"/>
+ </section>
+ </manifest>
+
+ </jar>
+ </target>
+
+ <!-- ====================================================== -->
+ <!-- Macro definitions -->
+ <!-- ====================================================== -->
+ <macrodef name="macro_tar" description="Worker Macro for tar">
+ <attribute name="param.destfile"/>
+ <element name="param.listofitems"/>
+ <sequential>
+ <tar compression="gzip" longfile="gnu"
+ destfile="@{param.destfile}">
+ <param.listofitems/>
+ </tar>
+ </sequential>
+ </macrodef>
+
+ <!-- ================================================================== -->
+ <!-- D I S T R I B U T I O N -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="local-package" depends="jar" description="Package in local build directory">
+ <mkdir dir="${build.dir}/${final.name}"/>
+ <mkdir dir="${build.dir}/${final.name}/logs"/>
+ <copy todir="${build.dir}/${final.name}" includeEmptyDirs="false">
+ <fileset dir="${build.dir}">
+ <include name="*.jar" />
+ <include name="*.war" />
+ </fileset>
+ </copy>
+ <copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
+ <fileset dir="${common.ivy.lib.dir}">
+ <include name="commons-logging-${commons-logging.version}"/>
+ <include name="commons-logging-api-${commons-logging-api.version}.jar"/>
+ <include name="junit-${junit.version}.jar"/>
+ <include name="log4j-${log4j.version}.jar"/>
+ <include name="slf4j-api-${slf4j-api.version}.jar"/>
+ <include name="slf4j-log4j${slf4j-log4j.version}.jar"/>
+ <include name="xmlenc-${xmlenc.version}.jar"/>
+ <include name="jetty-${jetty.version}.jar"/>
+ <include name="servlet-api-${servlet-api-2.5.version}.jar"/>
+ <include name="core-${core.vesion}"/>
+ </fileset>
+ <fileset dir="${hadoop.root}/lib/jsp-${jsp.version}">
+ <include name="jsp-${jsp.version}"/>
+ <include name="jsp-api-${jsp-api.vesion}"/>
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
+ <fileset dir="${hadoop.root}/build">
+ <include name="*-core.jar"/>
+ <include name="*-tools.jar"/>
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/bin">
+ <fileset dir="${bin.dir}"/>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/conf">
+ <fileset dir="${conf.dir}"/>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}">
+ <fileset dir="${basedir}">
+ <include name="README" />
+ <include name="build.xml" />
+ <include name="*.txt" />
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/src" includeEmptyDirs="true">
+ <fileset dir="${src.dir}" excludes="**/*.template **/docs/build/**/*"/>
+ </copy>
+
+ <chmod perm="ugo+x" type="file" parallel="false">
+ <fileset dir="${build.dir}/${final.name}/bin"/>
+ </chmod>
+
+ </target>
+
+ <target name="package" depends="local-package" description="Build distribution">
+ <mkdir dir="${dist.dir}/contrib/${name}"/>
+ <copy todir="${dist.dir}/contrib/${name}">
+ <fileset dir="${build.dir}/${final.name}">
+ <exclude name="**/lib/**" />
+ <exclude name="**/src/**" />
+ </fileset>
+ </copy>
+ <chmod dir="${dist.dir}/contrib/${name}/bin" perm="a+x" includes="*"/>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make release tarball -->
+ <!-- ================================================================== -->
+ <target name="tar" depends="local-package" description="Make release tarball">
+ <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <target name="binary" depends="local-package" description="Make tarball without source and documentation">
+ <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/src/**" />
+ <exclude name="${final.name}/docs/**" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <!-- the unit test classpath -->
+ <path id="test.classpath">
+ <pathelement location="${build.test}" />
+ <pathelement location="${hadoop.root}/build/test/classes"/>
+ <pathelement location="${hadoop.root}/src/contrib/test"/>
+ <pathelement location="${hadoop.root}/conf"/>
+ <pathelement location="${hadoop.root}/build"/>
+ <pathelement location="${hadoop.root}/build/classes"/>
+ <pathelement location="${hadoop.root}/build/tools"/>
+ <pathelement location="${build.examples}"/>
+ <path refid="contrib-classpath"/>
+ </path>
+
+
+</project>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl
new file mode 100644
index 0000000000..377cdbeb93
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml
new file mode 100644
index 0000000000..0d2a006c8e
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put hdfsproxy specific properties in this file. -->
+
+<configuration>
+
+<property>
+ <name>hdfsproxy.https.address</name>
+ <value>0.0.0.0:50479</value>
+ <description>the SSL port that hdfsproxy listens on
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.hosts</name>
+ <value>hdfsproxy-hosts</value>
+ <description>location of hdfsproxy-hosts file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.dfs.namenode.address</name>
+ <value></value>
+ <description>namenode address of the HDFS cluster being proxied
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.https.server.keystore.resource</name>
+ <value>ssl-server.xml</value>
+ <description>location of the resource from which ssl server keystore
+ information will be extracted
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.user.permissions.file.location</name>
+ <value>user-permissions.xml</value>
+ <description>location of the user permissions file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.user.certs.file.location</name>
+ <value>user-certs.xml</value>
+ <description>location of the user certs file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.ugi.cache.ugi.lifetime</name>
+ <value>15</value>
+ <description> The lifetime (in minutes) of a cached ugi
+ </description>
+</property>
+
+</configuration>
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh
new file mode 100644
index 0000000000..a0ff7a5d27
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh
@@ -0,0 +1,44 @@
+# Set HdfsProxy-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+# Extra Java CLASSPATH elements. Optional.
+# export HDFSPROXY_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HDFSPROXY_HEAPSIZE=2000
+
+# Extra Java runtime options. Empty by default.
+# export HDFSPROXY_OPTS=
+
+# Extra ssh options. Empty by default.
+# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
+
+# Where log files are stored. $HDFSPROXY_HOME/logs by default.
+# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
+
+# File naming remote slave hosts. $HDFSPROXY_HOME/conf/slaves by default.
+# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
+
+# host:path where hdfsproxy code should be rsync'd from. Unset by default.
+# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HDFSPROXY_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
+
+# A string representing this instance of hdfsproxy. $USER by default.
+# export HDFSPROXY_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HDFSPROXY_NICENESS=10
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
new file mode 100644
index 0000000000..a0ff7a5d27
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
@@ -0,0 +1,44 @@
+# Set HdfsProxy-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+# Extra Java CLASSPATH elements. Optional.
+# export HDFSPROXY_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HDFSPROXY_HEAPSIZE=2000
+
+# Extra Java runtime options. Empty by default.
+# export HDFSPROXY_OPTS=
+
+# Extra ssh options. Empty by default.
+# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
+
+# Where log files are stored. $HDFSPROXY_HOME/logs by default.
+# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
+
+# File naming remote slave hosts. $HDFSPROXY_HOME/conf/slaves by default.
+# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
+
+# host:path where hdfsproxy code should be rsync'd from. Unset by default.
+# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HDFSPROXY_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
+
+# A string representing this instance of hdfsproxy. $USER by default.
+# export HDFSPROXY_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HDFSPROXY_NICENESS=10
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts
new file mode 100644
index 0000000000..2fbb50c4a8
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts
@@ -0,0 +1 @@
+localhost
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties
new file mode 100644
index 0000000000..2520ab3795
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties
@@ -0,0 +1,61 @@
+# Define some default values that can be overridden by system properties
+hdfsproxy.root.logger=INFO,console
+hdfsproxy.log.dir=.
+hdfsproxy.log.file=hdfsproxy.log
+
+# Define the root logger to the system property "hdfsproxy.root.logger".
+log4j.rootLogger=${hdfsproxy.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.hdfsproxy.HttpsProxy=DEBUG
+#log4j.logger.org.apache.hadoop.hdfsproxy.ProxyFilter=DEBUG
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml
new file mode 100644
index 0000000000..f572a55294
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+
+This file defines the mappings from username to comma seperated list
+of certificate serial numbers that the user is allowed to use. One mapping
+per user. Wildcard characters, such as "*" and "?", are not recognized.
+Any leading or trailing whitespaces are stripped/ignored. Note that user
+"Admin" is the special hdfsproxy admin user. To make a user an admin, add
+the user's certificate serial number to user "Admin". Normal users cannot
+have "Admin" as username. Usernames can only comprise of 0-9a-zA-Z and
+underscore.
+
+-->
+
+<configuration>
+
+<property>
+ <name>Admin</name>
+ <value></value>
+ <description> Special hdfsproxy admin user
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml
new file mode 100644
index 0000000000..b7373751bd
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+
+This file defines the mappings from username to comma seperated list
+of directories/files that the user is allowed to use. One mapping
+per user. Wildcard characters, such as "*" and "?", are not recognized.
+For example, to match "/output" directory, one can use "/output" or
+"/output/", but not "/output/*". Any leading or trailing whitespaces
+in the name field are stripped/ignored, while only leading whitespaces
+in the value field are. Note that the special hdfsproxy admin user "Admin"
+doesn't automatically have access to any files, unless explicitly
+specified in this file. Usernames can only comprise of 0-9a-zA-Z and
+underscore.
+
+-->
+
+<configuration>
+
+<property>
+ <name></name>
+ <value></value>
+ <description>
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar b/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar
new file mode 100644
index 0000000000..a313391dfb
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar
Binary files differ