2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertFalse
;
22 import static org
.junit
.Assert
.assertNotNull
;
23 import static org
.junit
.Assert
.assertTrue
;
25 import java
.io
.IOException
;
26 import java
.util
.ArrayList
;
27 import java
.util
.List
;
28 import java
.util
.NavigableMap
;
29 import org
.apache
.hadoop
.hbase
.TimestampTestBase
.FlushCache
;
30 import org
.apache
.hadoop
.hbase
.client
.Admin
;
31 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
32 import org
.apache
.hadoop
.hbase
.client
.Get
;
33 import org
.apache
.hadoop
.hbase
.client
.Put
;
34 import org
.apache
.hadoop
.hbase
.client
.Result
;
35 import org
.apache
.hadoop
.hbase
.client
.ResultScanner
;
36 import org
.apache
.hadoop
.hbase
.client
.Scan
;
37 import org
.apache
.hadoop
.hbase
.client
.Table
;
38 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
39 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
40 import org
.apache
.hadoop
.hbase
.testclassification
.MediumTests
;
41 import org
.apache
.hadoop
.hbase
.testclassification
.MiscTests
;
42 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
43 import org
.apache
.hadoop
.hbase
.util
.Pair
;
44 import org
.junit
.AfterClass
;
45 import org
.junit
.Before
;
46 import org
.junit
.BeforeClass
;
47 import org
.junit
.ClassRule
;
48 import org
.junit
.Rule
;
49 import org
.junit
.Test
;
50 import org
.junit
.experimental
.categories
.Category
;
51 import org
.junit
.rules
.TestName
;
52 import org
.slf4j
.Logger
;
53 import org
.slf4j
.LoggerFactory
;
56 * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions
57 * from old testing framework to {@link HBaseTestingUtil}.
59 @Category({MiscTests
.class, MediumTests
.class})
60 public class TestMultiVersions
{
63 public static final HBaseClassTestRule CLASS_RULE
=
64 HBaseClassTestRule
.forClass(TestMultiVersions
.class);
66 private static final Logger LOG
= LoggerFactory
.getLogger(TestMultiVersions
.class);
67 private static final HBaseTestingUtil UTIL
= new HBaseTestingUtil();
70 private static final int NUM_SLAVES
= 3;
73 public TestName name
= new TestName();
76 public static void setUpBeforeClass() throws Exception
{
77 UTIL
.startMiniCluster(NUM_SLAVES
);
81 public static void tearDownAfterClass() throws Exception
{
82 UTIL
.shutdownMiniCluster();
87 throws MasterNotRunningException
, ZooKeeperConnectionException
, IOException
{
88 this.admin
= UTIL
.getAdmin();
92 * Tests user specifiable time stamps putting, getting and scanning. Also
93 * tests same in presence of deletes. Test cores are written so can be
94 * run against an HRegion and against an HTable: i.e. both local and remote.
96 * <p>Port of old TestTimestamp test to here so can better utilize the spun
97 * up cluster running more than a single test per spin up. Keep old tests'
101 public void testTimestamps() throws Exception
{
102 TableDescriptor tableDescriptor
=
103 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(name
.getMethodName()))
104 .setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(TimestampTestBase
.FAMILY_NAME
)
105 .setMaxVersions(3).build())
107 this.admin
.createTable(tableDescriptor
);
108 Table table
= UTIL
.getConnection().getTable(tableDescriptor
.getTableName());
109 // TODO: Remove these deprecated classes or pull them in here if this is
110 // only test using them.
111 TimestampTestBase
.doTestDelete(table
, new FlushCache() {
113 public void flushcache() throws IOException
{
114 UTIL
.getHBaseCluster().flushcache();
118 // Perhaps drop and readd the table between tests so the former does
119 // not pollute this latter? Or put into separate tests.
120 TimestampTestBase
.doTestTimestampScanning(table
, new FlushCache() {
122 public void flushcache() throws IOException
{
123 UTIL
.getMiniHBaseCluster().flushcache();
131 * Verifies versions across a cluster restart.
132 * Port of old TestGetRowVersions test to here so can better utilize the spun
133 * up cluster running more than a single test per spin up. Keep old tests'
137 public void testGetRowVersions() throws Exception
{
138 final byte [] contents
= Bytes
.toBytes("contents");
139 final byte [] row
= Bytes
.toBytes("row");
140 final byte [] value1
= Bytes
.toBytes("value1");
141 final byte [] value2
= Bytes
.toBytes("value2");
142 final long timestamp1
= 100L;
143 final long timestamp2
= 200L;
144 TableDescriptor tableDescriptor
=
145 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(name
.getMethodName()))
146 .setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(contents
)
147 .setMaxVersions(3).build())
149 this.admin
.createTable(tableDescriptor
);
150 Put put
= new Put(row
, timestamp1
);
151 put
.addColumn(contents
, contents
, value1
);
152 Table table
= UTIL
.getConnection().getTable(tableDescriptor
.getTableName());
154 // Shut down and restart the HBase cluster
156 UTIL
.shutdownMiniHBaseCluster();
157 LOG
.debug("HBase cluster shut down -- restarting");
158 StartTestingClusterOption option
= StartTestingClusterOption
.builder()
159 .numRegionServers(NUM_SLAVES
).build();
160 UTIL
.startMiniHBaseCluster(option
);
161 // Make a new connection.
162 table
= UTIL
.getConnection().getTable(tableDescriptor
.getTableName());
163 // Overwrite previous value
164 put
= new Put(row
, timestamp2
);
165 put
.addColumn(contents
, contents
, value2
);
167 // Now verify that getRow(row, column, latest) works
168 Get get
= new Get(row
);
169 // Should get one version by default
170 Result r
= table
.get(get
);
172 assertFalse(r
.isEmpty());
173 assertTrue(r
.size() == 1);
174 byte [] value
= r
.getValue(contents
, contents
);
175 assertTrue(value
.length
!= 0);
176 assertTrue(Bytes
.equals(value
, value2
));
177 // Now check getRow with multiple versions
179 get
.readAllVersions();
181 assertTrue(r
.size() == 2);
182 value
= r
.getValue(contents
, contents
);
183 assertTrue(value
.length
!= 0);
184 assertTrue(Bytes
.equals(value
, value2
));
185 NavigableMap
<byte[], NavigableMap
<byte[], NavigableMap
<Long
, byte[]>>> map
=
187 NavigableMap
<byte[], NavigableMap
<Long
, byte[]>> familyMap
=
189 NavigableMap
<Long
, byte[]> versionMap
= familyMap
.get(contents
);
190 assertTrue(versionMap
.size() == 2);
191 assertTrue(Bytes
.equals(value1
, versionMap
.get(timestamp1
)));
192 assertTrue(Bytes
.equals(value2
, versionMap
.get(timestamp2
)));
197 * Port of old TestScanMultipleVersions test here so can better utilize the
198 * spun up cluster running more than just a single test. Keep old tests
201 * <p>Tests five cases of scans and timestamps.
204 public void testScanMultipleVersions() throws Exception
{
205 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
206 TableDescriptor tableDescriptor
= TableDescriptorBuilder
.newBuilder(tableName
)
207 .setColumnFamily(ColumnFamilyDescriptorBuilder
.of(HConstants
.CATALOG_FAMILY
)).build();
209 final byte[][] rows
= new byte[][] { Bytes
.toBytes("row_0200"), Bytes
.toBytes("row_0800") };
210 final byte [][] splitRows
= new byte[][] {Bytes
.toBytes("row_0500")};
211 final long [] timestamp
= new long[] {100L, 1000L};
212 this.admin
.createTable(tableDescriptor
, splitRows
);
213 Table table
= UTIL
.getConnection().getTable(tableName
);
214 // Assert we got the region layout wanted.
215 Pair
<byte[][], byte[][]> keys
= UTIL
.getConnection()
216 .getRegionLocator(tableName
).getStartEndKeys();
217 assertEquals(2, keys
.getFirst().length
);
218 byte[][] startKeys
= keys
.getFirst();
219 byte[][] endKeys
= keys
.getSecond();
221 for (int i
= 0; i
< startKeys
.length
; i
++) {
223 assertTrue(Bytes
.equals(HConstants
.EMPTY_START_ROW
, startKeys
[i
]));
224 assertTrue(Bytes
.equals(endKeys
[i
], splitRows
[0]));
226 assertTrue(Bytes
.equals(splitRows
[0], startKeys
[i
]));
227 assertTrue(Bytes
.equals(endKeys
[i
], HConstants
.EMPTY_END_ROW
));
231 List
<Put
> puts
= new ArrayList
<>();
232 for (int i
= 0; i
< startKeys
.length
; i
++) {
233 for (int j
= 0; j
< timestamp
.length
; j
++) {
234 Put put
= new Put(rows
[i
], timestamp
[j
]);
235 put
.addColumn(HConstants
.CATALOG_FAMILY
, null, timestamp
[j
], Bytes
.toBytes(timestamp
[j
]));
240 // There are 5 cases we have to test. Each is described below.
241 for (int i
= 0; i
< rows
.length
; i
++) {
242 for (int j
= 0; j
< timestamp
.length
; j
++) {
243 Get get
= new Get(rows
[i
]);
244 get
.addFamily(HConstants
.CATALOG_FAMILY
);
245 get
.setTimestamp(timestamp
[j
]);
246 Result result
= table
.get(get
);
248 for(@SuppressWarnings("unused")Cell kv
: result
.listCells()) {
251 assertTrue(cellCount
== 1);
255 // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
257 Scan scan
= new Scan();
258 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
259 ResultScanner s
= table
.getScanner(scan
);
261 for (Result rr
= null; (rr
= s
.next()) != null;) {
262 System
.out
.println(rr
.toString());
265 assertEquals("Number of rows should be 2", 2, count
);
270 // Case 2: Scan with a timestamp greater than most recent timestamp
271 // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
275 scan
.setTimeRange(1000L, Long
.MAX_VALUE
);
276 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
278 s
= table
.getScanner(scan
);
280 while (s
.next() != null) {
283 assertEquals("Number of rows should be 2", 2, count
);
288 // Case 3: scan with timestamp equal to most recent timestamp
289 // (in this case == 1000. Should get 2 rows.
293 scan
.setTimestamp(1000L);
294 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
296 s
= table
.getScanner(scan
);
298 while (s
.next() != null) {
301 assertEquals("Number of rows should be 2", 2, count
);
306 // Case 4: scan with timestamp greater than first timestamp but less than
307 // second timestamp (100 < timestamp < 1000). Should get 2 rows.
311 scan
.setTimeRange(100L, 1000L);
312 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
314 s
= table
.getScanner(scan
);
316 while (s
.next() != null) {
319 assertEquals("Number of rows should be 2", 2, count
);
324 // Case 5: scan with timestamp equal to first timestamp (100)
325 // Should get 2 rows.
329 scan
.setTimestamp(100L);
330 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
332 s
= table
.getScanner(scan
);
334 while (s
.next() != null) {
337 assertEquals("Number of rows should be 2", 2, count
);