2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
21 echo "Usage: ${0} [options] /path/to/component/bin-install /path/to/hadoop/executable /path/to/share/hadoop/yarn/timelineservice /path/to/hadoop/hadoop-yarn-server-tests-tests.jar /path/to/hadoop/hadoop-mapreduce-client-jobclient-tests.jar /path/to/mapred/executable"
23 echo " --zookeeper-data /path/to/use Where the embedded zookeeper instance should write its data."
24 echo " defaults to 'zk-data' in the working-dir."
25 echo " --working-dir /path/to/use Path for writing configs and logs. must exist."
26 echo " defaults to making a directory via mktemp."
27 echo " --hadoop-client-classpath /path/to/some.jar:/path/to/another.jar classpath for hadoop jars."
28 echo " defaults to 'hadoop classpath'"
29 echo " --hbase-client-install /path/to/unpacked/client/tarball if given we'll look here for hbase client jars instead of the bin-install"
30 echo " --force-data-clean Delete all data in HDFS and ZK prior to starting up hbase"
31 echo " --single-process Run as single process instead of pseudo-distributed"
35 # if no args specified, show usage
41 declare component_install
46 declare distributed
="true"
52 --working-dir) shift; working_dir
=$1; shift;;
53 --force-data-clean) shift; clean
="true";;
54 --zookeeper-data) shift; zk_data_dir
=$1; shift;;
55 --single-process) shift; distributed
="false";;
56 --hadoop-client-classpath) shift; hadoop_jars
="$1"; shift;;
57 --hbase-client-install) shift; hbase_client
="$1"; shift;;
60 *) break;; # terminate while loop
64 # should still have where component checkout is.
68 component_install
="$(cd "$
(dirname "$1")"; pwd)/$(basename "$1")"
69 hadoop_exec
="$(cd "$
(dirname "$2")"; pwd)/$(basename "$2")"
70 timeline_service_dir
="$(cd "$
(dirname "$3")"; pwd)/$(basename "$3")"
71 yarn_server_tests_test_jar
="$(cd "$
(dirname "$4")"; pwd)/$(basename "$4")"
72 mapred_jobclient_test_jar
="$(cd "$
(dirname "$5")"; pwd)/$(basename "$5")"
73 mapred_exec
="$(cd "$
(dirname "$6")"; pwd)/$(basename "$6")"
75 if [ ! -x "${hadoop_exec}" ]; then
76 echo "hadoop cli does not appear to be executable." >&2
80 if [ ! -x "${mapred_exec}" ]; then
81 echo "mapred cli does not appear to be executable." >&2
85 if [ ! -d "${component_install}" ]; then
86 echo "Path to HBase binary install should be a directory." >&2
90 if [ ! -f "${yarn_server_tests_test_jar}" ]; then
91 echo "Specified YARN server tests test jar is not a file." >&2
95 if [ ! -f "${mapred_jobclient_test_jar}" ]; then
96 echo "Specified MapReduce jobclient test jar is not a file." >&2
100 if [ -z "${working_dir}" ]; then
101 if ! working_dir
="$(mktemp -d -t hbase-pseudo-dist-test)" ; then
102 echo "Failed to create temporary working directory. Please specify via --working-dir" >&2
107 working_dir
="$(cd "$
(dirname "${working_dir}")"; pwd)/$(basename "${working_dir}")"
108 if [ ! -d "${working_dir}" ]; then
109 echo "passed working directory '${working_dir}' must already exist." >&2
114 if [ -z "${zk_data_dir}" ]; then
115 zk_data_dir
="${working_dir}/zk-data"
116 mkdir
"${zk_data_dir}"
119 zk_data_dir
="$(cd "$
(dirname "${zk_data_dir}")"; pwd)/$(basename "${zk_data_dir}")"
120 if [ ! -d "${zk_data_dir}" ]; then
121 echo "passed directory for unpacking the source tarball '${zk_data_dir}' must already exist."
126 if [ -z "${hbase_client}" ]; then
127 hbase_client
="${component_install}"
129 echo "Using HBase client-side artifact"
131 hbase_client
="$(cd "$
(dirname "${hbase_client}")"; pwd)/$(basename "${hbase_client}")"
132 if [ ! -d "${hbase_client}" ]; then
133 echo "If given hbase client install should be a directory with contents of the client tarball." >&2
138 if [ -n "${hadoop_jars}" ]; then
140 for entry
in $
(echo "${hadoop_jars}" |
tr ':' '\n'); do
141 tmp_jars
=("${tmp_jars[@]}" "$(cd "$(dirname "${entry}")"; pwd)/$(basename "${entry}")")
143 hadoop_jars="$
(IFS
=:; echo "${tmp_jars[*]}")"
147 echo "You
'll find logs and temp files in ${working_dir}"
149 function redirect_and_run {
152 echo "$*" >"${log_base}.err"
153 "$@" >"${log_base}.out" 2>>"${log_base}.err"
158 echo "Hadoop version information:"
159 "${hadoop_exec}" version
160 hadoop_version=$("${hadoop_exec}" version | head -n 1)
161 hadoop_version="${hadoop_version#Hadoop }"
162 if [ "${hadoop_version%.*.*}" -gt 2 ]; then
163 "${hadoop_exec}" envvars
165 echo "JAVA_HOME: ${JAVA_HOME}"
168 # Ensure that if some other Hadoop install happens to be present in the environment we ignore it.
169 HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP="true"
170 export HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP
172 if [ -n "${clean}" ]; then
173 echo "Cleaning out ZooKeeper..."
174 rm -rf "${zk_data_dir:?}/*"
177 echo "HBase version information:"
178 "${component_install}/bin/hbase" version 2>/dev/null
179 hbase_version=$("${component_install}/bin/hbase" version 2>&1 | grep ^HBase | head -n 1)
180 hbase_version="${hbase_version#HBase }"
182 if [ ! -s "${hbase_client}/lib/shaded-clients/hbase-shaded-mapreduce-${hbase_version}.jar" ]; then
183 echo "HBase binary install doesn't appear to include a shaded mapreduce artifact.
" >&2
187 if [ ! -s "${hbase_client}/lib
/shaded-clients
/hbase-shaded-client-
${hbase_version}.jar
" ]; then
188 echo "HBase binary
install doesn
't appear to include a shaded client artifact." >&2
192 if [ ! -s "${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar" ]; then
193 echo "HBase binary install doesn't appear to include a shaded client artifact.
" >&2
197 echo "Writing out configuration
for HBase.
"
198 rm -rf "${working_dir}/hbase-conf
"
199 mkdir "${working_dir}/hbase-conf
"
201 if [ -f "${component_install}/conf
/log4j2.properties
" ]; then
202 cp "${component_install}/conf
/log4j2.properties
" "${working_dir}/hbase-conf
/log4j2.properties
"
204 cat >"${working_dir}/hbase-conf
/log4j2.properties
" <<EOF
207 name = PropertiesConfig
209 appender.console.type = Console
210 appender.console.target = SYSTEM_ERR
211 appender.console.name = Console
212 appender.console.layout.type = PatternLayout
213 appender.console.layout.pattern = %d{ISO8601} %-5p [%t] %c{2}: %.1000m%n
215 rootLogger = ${sys:hbase.root.logger:-INFO,console}
219 cat >"${working_dir}/hbase-conf
/hbase-site.xml
" <<EOF
220 <?xml version="1.0"?>
221 <?xml-stylesheet type="text
/xsl
" href="configuration.xsl
"?>
225 * Licensed to the Apache Software Foundation (ASF) under one
226 * or more contributor license agreements. See the NOTICE file
227 * distributed with this work for additional information
228 * regarding copyright ownership. The ASF licenses this file
229 * to you under the Apache License, Version 2.0 (the
230 * "License
"); you may not use this file except in compliance
231 * with the License. You may obtain a copy of the License at
233 * http://www.apache.org/licenses/LICENSE-2.0
235 * Unless required by applicable law or agreed to in writing, software
236 * distributed under the License is distributed on an "AS IS
" BASIS,
237 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
238 * See the License for the specific language governing permissions and
239 * limitations under the License.
244 <name>hbase.rootdir</name>
245 <!-- We rely on the defaultFS being set in our hadoop confs -->
246 <value>/hbase</value>
249 <name>hbase.zookeeper.property.dataDir</name>
250 <value>${zk_data_dir}</value>
253 <name>hbase.cluster.distributed</name>
254 <value>${distributed}</value>
259 if [ "true
" = "${distributed}" ]; then
260 cat >"${working_dir}/hbase-conf
/regionservers
" <<EOF
267 echo "Shutting down HBase
"
268 HBASE_CONF_DIR="${working_dir}/hbase-conf
/" "${component_install}/bin
/stop-hbase.sh
"
270 if [ -f "${working_dir}/hadoop.pid
" ]; then
271 echo "Shutdown
: listing HDFS contents
"
272 redirect_and_run "${working_dir}/hadoop_listing_at_end
" \
273 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -ls -R /
275 echo "Shutting down Hadoop
"
276 kill -6 "$
(cat "${working_dir}/hadoop.pid")"
280 trap cleanup EXIT SIGQUIT
282 echo "Starting up Hadoop
"
284 if [ "${hadoop_version%.*.*}" -gt 2 ]; then
285 "${mapred_exec}" minicluster -format -writeConfig "${working_dir}/hbase-conf/core-site.xml" -writeDetails "${working_dir}/hadoop_cluster_info.json" >"${working_dir}/hadoop_cluster_command.out" 2>"${working_dir}/hadoop_cluster_command.err
" &
287 HADOOP_CLASSPATH="${timeline_service_dir}/*:${timeline_service_dir}/lib/*:${yarn_server_tests_test_jar}" "${hadoop_exec}" jar "${mapred_jobclient_test_jar}" minicluster -format -writeConfig "${working_dir}/hbase-conf/core-site.xml" -writeDetails "${working_dir}/hadoop_cluster_info.json" >"${working_dir}/hadoop_cluster_command.out" 2>"${working_dir}/hadoop_cluster_command.err
" &
290 echo "$
!" > "${working_dir}/hadoop.pid
"
292 # 2 + 4 + 8 + .. + 256 ~= 8.5 minutes.
295 until [[ -s "${working_dir}/hbase-conf/core-site.xml" || "${sleep_time}" -ge "${max_sleep_time}" ]]; do
296 printf '\twaiting for Hadoop to finish starting up.\n'
297 sleep "${sleep_time}"
298 sleep_time="$
((sleep_time
*2))"
301 if [ "${sleep_time}" -ge "${max_sleep_time}" ] ; then
302 echo "time out waiting
for Hadoop to startup
" >&2
306 if [ "${hadoop_version%.*.*}" -gt 2 ]; then
307 echo "Verifying configs
"
309 for f in "${working_dir}"/hbase-conf/*-site.xml; do
310 hadoop_conf_files="$hadoop_conf_files -conffile $f"
312 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" conftest $hadoop_conf_files
315 if [ -n "${clean}" ]; then
316 echo "Cleaning out HDFS...
"
317 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -rm -r /hbase
318 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -rm -r example/
319 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -rm -r example-region-listing.data
322 echo "Listing HDFS contents
"
323 redirect_and_run "${working_dir}/hadoop_cluster_smoke
" \
324 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -ls -R /
326 echo "Starting up HBase
"
327 HBASE_CONF_DIR="${working_dir}/hbase-conf
/" "${component_install}/bin
/start-hbase.sh
"
330 until "${component_install}/bin/hbase" --config "${working_dir}/hbase-conf/" shell --noninteractive >"${working_dir}/waiting_hbase_startup.log
" 2>&1 <<EOF
334 printf '\tretry waiting for hbase to come up.\n'
335 sleep "${sleep_time}"
336 sleep_time="$
((sleep_time
*2))"
339 echo "Setting up table
'test:example' with
1,000 regions
"
340 "${hbase_client}/bin/hbase" --config "${working_dir}/hbase-conf/" shell --noninteractive >"${working_dir}/table_create.log
" 2>&1 <<EOF
341 create_namespace 'test'
342 create 'test:example', 'family1', 'family2', {NUMREGIONS => 1000, SPLITALGO => 'UniformSplit'}
345 echo "writing out example TSV to example.tsv
"
346 cat >"${working_dir}/example.tsv
" <<EOF
397 echo "uploading example.tsv to HDFS
"
398 "${hadoop_exec}" --config "${working_dir}/hbase-conf
/" fs -mkdir example
399 "${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -copyFromLocal "${working_dir}/example.tsv
" "example
/"
401 echo "Importing TSV via shaded client artifact
for HBase
- MapReduce integration.
"
402 # hbase_thirdparty_jars=("${component_install}"/lib/htrace-core4*.jar \
403 # "${component_install}"/lib/slf4j-api-*.jar \
404 # "${component_install}"/lib/commons-logging-*.jar \
405 # "${component_install}"/lib/slf4j-log4j12-*.jar \
406 # "${component_install}"/lib/log4j-1.2.*.jar \
407 # "${working_dir}/hbase-conf
/log4j.properties
")
408 # hbase_dep_classpath=$(IFS=:; echo "${hbase_thirdparty_jars[*]}")
409 hbase_dep_classpath="$
("${hbase_client}/bin/hbase" --config "${working_dir}/hbase-conf/" mapredcp
)"
410 HADOOP_CLASSPATH="${hbase_dep_classpath}" redirect_and_run "${working_dir}/mr-importtsv
" \
411 "${hadoop_exec}" --config "${working_dir}/hbase-conf/" jar "${hbase_client}/lib/shaded-clients/hbase-shaded-mapreduce-${hbase_version}.jar" importtsv -Dimporttsv.columns=HBASE_ROW_KEY,family1:column1,family1:column4,family1:column3 test:example example/ -libjars "${hbase_dep_classpath}"
412 "${hbase_client}/bin/hbase" --config "${working_dir}/hbase-conf/" shell --noninteractive >"${working_dir}/scan_import.out" 2>"${working_dir}/scan_import.err
" <<EOF
416 echo "Verifying row count from import.
"
417 import_rowcount=$(echo 'count "test:example
"' | "${hbase_client}/bin
/hbase
" --config "${working_dir}/hbase-conf
/" shell --noninteractive 2>/dev/null | tail -n 1)
418 if [ ! "${import_rowcount}" -eq 48 ]; then
419 echo "ERROR
: Instead of finding
48 rows
, we found
${import_rowcount}.
"
423 if [ -z "${hadoop_jars}" ]; then
424 echo "Hadoop client jars not given
; getting them from
'hadoop classpath' for the example.
"
425 hadoop_jars=$("${hadoop_exec}" --config "${working_dir}/hbase-conf
/" classpath)
428 echo "Building shaded client example.
"
429 cat >"${working_dir}/HBaseClientReadWriteExample.java
" <<EOF
430 import org.apache.hadoop.conf.Configuration;
431 import org.apache.hadoop.fs.FileSystem;
432 import org.apache.hadoop.fs.FSDataInputStream;
433 import org.apache.hadoop.fs.FSDataOutputStream;
434 import org.apache.hadoop.fs.Path;
435 import org.apache.hadoop.hbase.Cell;
436 import org.apache.hadoop.hbase.CellBuilder;
437 import org.apache.hadoop.hbase.CellBuilderFactory;
438 import org.apache.hadoop.hbase.CellBuilderType;
439 import org.apache.hadoop.hbase.ClusterMetrics;
440 import org.apache.hadoop.hbase.HBaseConfiguration;
441 import org.apache.hadoop.hbase.RegionMetrics;
442 import org.apache.hadoop.hbase.ServerMetrics;
443 import org.apache.hadoop.hbase.TableName;
444 import org.apache.hadoop.hbase.client.Admin;
445 import org.apache.hadoop.hbase.client.Connection;
446 import org.apache.hadoop.hbase.client.ConnectionFactory;
447 import org.apache.hadoop.hbase.client.Put;
448 import org.apache.hadoop.hbase.client.Table;
449 import org.apache.hadoop.hbase.util.Bytes;
451 import java.util.LinkedList;
452 import java.util.List;
455 public class HBaseClientReadWriteExample {
456 private static final byte[] FAMILY_BYTES = Bytes.toBytes("family2
");
458 public static void main(String[] args) throws Exception {
459 Configuration hbase = HBaseConfiguration.create();
460 Configuration hadoop = new Configuration();
461 try (Connection connection = ConnectionFactory.createConnection(hbase)) {
462 System.out.println("Generating list of regions
");
463 final List<String> regions = new LinkedList<>();
464 try (Admin admin = connection.getAdmin()) {
465 final ClusterMetrics cluster = admin.getClusterMetrics();
466 System.out.println(String.format("\tCluster reports version
%s
, ave load
%f
, region count
%d
", cluster.getHBaseVersion(), cluster.getAverageLoad(), cluster.getRegionCount()));
467 for (ServerMetrics server : cluster.getLiveServerMetrics().values()) {
468 for (RegionMetrics region : server.getRegionMetrics().values()) {
469 regions.add(region.getNameAsString());
473 final Path listing = new Path("example-region-listing.data
");
474 System.out.println("Writing list to HDFS
");
475 try (FileSystem fs = FileSystem.newInstance(hadoop)) {
476 final Path path = fs.makeQualified(listing);
477 try (FSDataOutputStream out = fs.create(path)) {
478 out.writeInt(regions.size());
479 for (String region : regions) {
480 out.writeUTF(region);
485 final List<Put> puts = new LinkedList<>();
486 final Put marker = new Put(new byte[] { (byte)0 });
487 System.out.println("Reading list from HDFS
");
488 try (FileSystem fs = FileSystem.newInstance(hadoop)) {
489 final Path path = fs.makeQualified(listing);
490 final CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
491 try (FSDataInputStream in = fs.open(path)) {
492 final int count = in.readInt();
493 marker.addColumn(FAMILY_BYTES, Bytes.toBytes("count
"), Bytes.toBytes(count));
494 for(int i = 0; i < count; i++) {
496 final byte[] row = Bytes.toBytes(in.readUTF());
497 final Put put = new Put(row);
499 builder.setFamily(FAMILY_BYTES);
500 builder.setType(Cell.Type.Put);
501 put.add(builder.build());
506 System.out.println("Writing list into HBase table
");
507 try (Table table = connection.getTable(TableName.valueOf("test:example
"))) {
515 redirect_and_run "${working_dir}/hbase-shaded-client-compile
" \
516 javac -cp "${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hadoop_jars}" "${working_dir}/HBaseClientReadWriteExample.java
"
517 echo "Running shaded client example. It
'll fetch the set of regions, round-trip them to a file in HDFS, then write them one-per-row into the test table."
518 # The order of classpath entries here is important. if we're using non-shaded Hadoop
3 / 2.9.0 jars
, we have to work around YARN-2190.
519 redirect_and_run
"${working_dir}/hbase-shaded-client-example" \
520 java
-cp "${working_dir}/hbase-conf/:${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hbase_dep_classpath}:${working_dir}:${hadoop_jars}" HBaseClientReadWriteExample
522 echo "Checking on results of example program."
523 "${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -copyToLocal "example-region-listing.data" "${working_dir}/example-region-listing.data"
525 "${hbase_client}/bin/hbase" --config "${working_dir}/hbase-conf/" shell --noninteractive >"${working_dir}/scan_example.out" 2>"${working_dir}/scan_example.err" <<EOF
529 echo "Verifying row count from example."
530 example_rowcount
=$
(echo 'count "test:example"' |
"${hbase_client}/bin/hbase" --config "${working_dir}/hbase-conf/" shell
--noninteractive 2>/dev
/null |
tail -n 1)
531 if [ "${example_rowcount}" -gt "1049" ]; then
532 echo "Found ${example_rowcount} rows, which is enough to cover 48 for import, 1000 example's use of user table regions, 1 for example's use of meta region, and 1 for example's count record"
534 echo "ERROR: Only found ${example_rowcount} rows."