2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.regionserver
;
20 import static org
.junit
.Assert
.assertTrue
;
22 import java
.io
.IOException
;
23 import java
.util
.ArrayList
;
24 import java
.util
.Collection
;
25 import java
.util
.List
;
26 import java
.util
.Random
;
27 import org
.apache
.hadoop
.conf
.Configuration
;
28 import org
.apache
.hadoop
.fs
.FileSystem
;
29 import org
.apache
.hadoop
.fs
.Path
;
30 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
31 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
32 import org
.apache
.hadoop
.hbase
.KeyValue
;
33 import org
.apache
.hadoop
.hbase
.TableName
;
34 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
35 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
36 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
37 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
38 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
39 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
40 import org
.apache
.hadoop
.hbase
.fs
.HFileSystem
;
41 import org
.apache
.hadoop
.hbase
.io
.encoding
.DataBlockEncoding
;
42 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCache
;
43 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCacheFactory
;
44 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCacheKey
;
45 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockType
;
46 import org
.apache
.hadoop
.hbase
.io
.hfile
.CacheConfig
;
47 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFile
;
48 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFileBlock
;
49 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFileScanner
;
50 import org
.apache
.hadoop
.hbase
.io
.hfile
.RandomKeyValueUtil
;
51 import org
.apache
.hadoop
.hbase
.testclassification
.RegionServerTests
;
52 import org
.apache
.hadoop
.hbase
.testclassification
.SmallTests
;
53 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
54 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
55 import org
.apache
.hadoop
.hbase
.wal
.AbstractFSWALProvider
;
56 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
57 import org
.junit
.After
;
58 import org
.junit
.Before
;
59 import org
.junit
.ClassRule
;
60 import org
.junit
.Rule
;
61 import org
.junit
.Test
;
62 import org
.junit
.experimental
.categories
.Category
;
63 import org
.junit
.rules
.TestName
;
64 import org
.junit
.runner
.RunWith
;
65 import org
.junit
.runners
.Parameterized
;
66 import org
.junit
.runners
.Parameterized
.Parameters
;
67 import org
.slf4j
.Logger
;
68 import org
.slf4j
.LoggerFactory
;
71 * Tests {@link HFile} cache-on-write functionality for data blocks, non-root
72 * index blocks, and Bloom filter blocks, as specified by the column family.
74 @RunWith(Parameterized
.class)
75 @Category({RegionServerTests
.class, SmallTests
.class})
76 public class TestCacheOnWriteInSchema
{
79 public static final HBaseClassTestRule CLASS_RULE
=
80 HBaseClassTestRule
.forClass(TestCacheOnWriteInSchema
.class);
82 private static final Logger LOG
= LoggerFactory
.getLogger(TestCacheOnWriteInSchema
.class);
83 @Rule public TestName name
= new TestName();
85 private static final HBaseTestingUtil TEST_UTIL
= new HBaseTestingUtil();
86 private static final String DIR
= TEST_UTIL
.getDataTestDir("TestCacheOnWriteInSchema").toString();
87 private static byte [] table
;
88 private static byte [] family
= Bytes
.toBytes("family");
89 private static final int NUM_KV
= 25000;
90 private static final Random rand
= new Random(12983177L);
91 /** The number of valid key types possible in a store file */
92 private static final int NUM_VALID_KEY_TYPES
=
93 KeyValue
.Type
.values().length
- 2;
95 private static enum CacheOnWriteType
{
96 DATA_BLOCKS(BlockType
.DATA
, BlockType
.ENCODED_DATA
),
97 BLOOM_BLOCKS(BlockType
.BLOOM_CHUNK
),
98 INDEX_BLOCKS(BlockType
.LEAF_INDEX
, BlockType
.INTERMEDIATE_INDEX
);
100 private final BlockType blockType1
;
101 private final BlockType blockType2
;
103 private CacheOnWriteType(BlockType blockType
) {
104 this(blockType
, blockType
);
107 private CacheOnWriteType(BlockType blockType1
, BlockType blockType2
) {
108 this.blockType1
= blockType1
;
109 this.blockType2
= blockType2
;
112 public boolean shouldBeCached(BlockType blockType
) {
113 return blockType
== blockType1
|| blockType
== blockType2
;
116 public ColumnFamilyDescriptorBuilder
modifyFamilySchema(ColumnFamilyDescriptorBuilder builder
) {
119 builder
.setCacheDataOnWrite(true);
122 builder
.setCacheBloomsOnWrite(true);
125 builder
.setCacheIndexesOnWrite(true);
132 private final CacheOnWriteType cowType
;
133 private Configuration conf
;
134 private final String testDescription
;
135 private HRegion region
;
136 private HStore store
;
137 private WALFactory walFactory
;
138 private FileSystem fs
;
140 public TestCacheOnWriteInSchema(CacheOnWriteType cowType
) {
141 this.cowType
= cowType
;
142 testDescription
= "[cacheOnWrite=" + cowType
+ "]";
143 System
.out
.println(testDescription
);
147 public static Collection
<Object
[]> getParameters() {
148 List
<Object
[]> cowTypes
= new ArrayList
<>();
149 for (CacheOnWriteType cowType
: CacheOnWriteType
.values()) {
150 cowTypes
.add(new Object
[] { cowType
});
156 public void setUp() throws IOException
{
157 // parameterized tests add [#] suffix get rid of [ and ].
158 table
= Bytes
.toBytes(name
.getMethodName().replaceAll("[\\[\\]]", "_"));
160 conf
= TEST_UTIL
.getConfiguration();
161 conf
.setInt(HFile
.FORMAT_VERSION_KEY
, HFile
.MAX_FORMAT_VERSION
);
162 conf
.setBoolean(CacheConfig
.CACHE_BLOCKS_ON_WRITE_KEY
, false);
163 conf
.setBoolean(CacheConfig
.CACHE_INDEX_BLOCKS_ON_WRITE_KEY
, false);
164 conf
.setBoolean(CacheConfig
.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY
, false);
165 fs
= HFileSystem
.get(conf
);
168 ColumnFamilyDescriptor hcd
= cowType
170 ColumnFamilyDescriptorBuilder
.newBuilder(family
).setBloomFilterType(BloomType
.ROWCOL
))
172 TableDescriptor htd
=
173 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(table
)).setColumnFamily(hcd
).build();
175 // Create a store based on the schema
176 String id
= TestCacheOnWriteInSchema
.class.getName();
178 new Path(CommonFSUtils
.getRootDir(conf
), AbstractFSWALProvider
.getWALDirectoryName(id
));
179 fs
.delete(logdir
, true);
181 RegionInfo info
= RegionInfoBuilder
.newBuilder(htd
.getTableName()).build();
182 walFactory
= new WALFactory(conf
, id
);
184 region
= TEST_UTIL
.createLocalHRegion(info
, conf
, htd
, walFactory
.getWAL(info
));
185 region
.setBlockCache(BlockCacheFactory
.createBlockCache(conf
));
186 store
= new HStore(region
, hcd
, conf
, false);
190 public void tearDown() throws IOException
{
191 IOException ex
= null;
194 } catch (IOException e
) {
195 LOG
.warn("Caught Exception", e
);
200 } catch (IOException e
) {
201 LOG
.warn("Caught Exception", e
);
205 fs
.delete(new Path(DIR
), true);
206 } catch (IOException e
) {
207 LOG
.error("Could not delete " + DIR
, e
);
216 public void testCacheOnWriteInSchema() throws IOException
{
217 // Write some random data into the store
218 StoreFileWriter writer
= store
.getStoreEngine()
219 .createWriter(CreateStoreFileWriterParams
.create().maxKeyCount(Integer
.MAX_VALUE
)
220 .compression(HFile
.DEFAULT_COMPRESSION_ALGORITHM
).isCompaction(false)
221 .includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false));
222 writeStoreFile(writer
);
224 // Verify the block types of interest were cached on write
225 readStoreFile(writer
.getPath());
228 private void readStoreFile(Path path
) throws IOException
{
229 CacheConfig cacheConf
= store
.getCacheConfig();
230 BlockCache cache
= cacheConf
.getBlockCache().get();
231 HStoreFile sf
= new HStoreFile(fs
, path
, conf
, cacheConf
, BloomType
.ROWCOL
, true);
233 HFile
.Reader reader
= sf
.getReader().getHFileReader();
235 // Open a scanner with (on read) caching disabled
236 HFileScanner scanner
= reader
.getScanner(conf
, false, false);
237 assertTrue(testDescription
, scanner
.seekTo());
238 // Cribbed from io.hfile.TestCacheOnWrite
240 while (offset
< reader
.getTrailer().getLoadOnOpenDataOffset()) {
241 // Flags: don't cache the block, use pread, this is not a compaction.
242 // Also, pass null for expected block type to avoid checking it.
243 HFileBlock block
= reader
.readBlock(offset
, -1, false, true,
244 false, true, null, DataBlockEncoding
.NONE
);
245 BlockCacheKey blockCacheKey
= new BlockCacheKey(reader
.getName(),
247 boolean isCached
= cache
.getBlock(blockCacheKey
, true, false, true) != null;
248 boolean shouldBeCached
= cowType
.shouldBeCached(block
.getBlockType());
249 final BlockType blockType
= block
.getBlockType();
251 if (shouldBeCached
!= isCached
&&
252 (cowType
.blockType1
.equals(blockType
) || cowType
.blockType2
.equals(blockType
))) {
253 throw new AssertionError(
254 "shouldBeCached: " + shouldBeCached
+ "\n" +
255 "isCached: " + isCached
+ "\n" +
256 "Test description: " + testDescription
+ "\n" +
257 "block: " + block
+ "\n" +
258 "blockCacheKey: " + blockCacheKey
);
260 offset
+= block
.getOnDiskSizeWithHeader();
267 private static KeyValue
.Type
generateKeyType(Random rand
) {
268 if (rand
.nextBoolean()) {
269 // Let's make half of KVs puts.
270 return KeyValue
.Type
.Put
;
272 KeyValue
.Type keyType
=
273 KeyValue
.Type
.values()[1 + rand
.nextInt(NUM_VALID_KEY_TYPES
)];
274 if (keyType
== KeyValue
.Type
.Minimum
|| keyType
== KeyValue
.Type
.Maximum
) {
275 throw new RuntimeException("Generated an invalid key type: " + keyType
276 + ". " + "Probably the layout of KeyValue.Type has changed.");
282 private void writeStoreFile(StoreFileWriter writer
) throws IOException
{
283 final int rowLen
= 32;
284 for (int i
= 0; i
< NUM_KV
; ++i
) {
285 byte[] k
= RandomKeyValueUtil
.randomOrderedKey(rand
, i
);
286 byte[] v
= RandomKeyValueUtil
.randomValue(rand
);
287 int cfLen
= rand
.nextInt(k
.length
- rowLen
+ 1);
288 KeyValue kv
= new KeyValue(
291 k
, rowLen
+ cfLen
, k
.length
- rowLen
- cfLen
,
293 generateKeyType(rand
),