1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.regionserver;
20
21 import java.io.IOException;
22 import java.util.Map;
23 import java.util.TreeMap;
24
25 import org.apache.hadoop.fs.Path;
26 import org.apache.hadoop.hbase.HBaseTestingUtility;
27 import org.apache.hadoop.hbase.KeyValue.KVComparator;
28 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
29 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
30 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
31 import org.apache.hadoop.hbase.util.Bytes;
32 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
33
34
35 public class MockStoreFile extends StoreFile {
36 long length = 0;
37 boolean isRef = false;
38 long ageInDisk;
39 long sequenceid;
40 private Map<byte[], byte[]> metadata = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
41 byte[] splitPoint = null;
42 TimeRangeTracker timeRangeTracker;
43 long entryCount;
44 boolean isMajor;
45 HDFSBlocksDistribution hdfsBlocksDistribution;
46 long modificationTime;
47
48 MockStoreFile(HBaseTestingUtility testUtil, Path testPath,
49 long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
50 super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
51 new CacheConfig(testUtil.getConfiguration()), BloomType.NONE);
52 this.length = length;
53 this.isRef = isRef;
54 this.ageInDisk = ageInDisk;
55 this.sequenceid = sequenceid;
56 this.isMajor = false;
57 hdfsBlocksDistribution = new HDFSBlocksDistribution();
58 hdfsBlocksDistribution.addHostsAndBlockWeight(
59 new String[] { RSRpcServices.getHostname(testUtil.getConfiguration(), false) }, 1);
60 modificationTime = EnvironmentEdgeManager.currentTime();
61 }
62
63 void setLength(long newLen) {
64 this.length = newLen;
65 }
66
67 @Override
68 byte[] getFileSplitPoint(KVComparator comparator) throws IOException {
69 return this.splitPoint;
70 }
71
72 @Override
73 public long getMaxSequenceId() {
74 return sequenceid;
75 }
76
77 @Override
78 public boolean isMajorCompaction() {
79 return isMajor;
80 }
81
82 public void setIsMajor(boolean isMajor) {
83 this.isMajor = isMajor;
84 }
85
86 @Override
87 public boolean isReference() {
88 return this.isRef;
89 }
90
91 @Override
92 public boolean isBulkLoadResult() {
93 return false;
94 }
95
96 @Override
97 public byte[] getMetadataValue(byte[] key) {
98 return this.metadata.get(key);
99 }
100
101 public void setMetadataValue(byte[] key, byte[] value) {
102 this.metadata.put(key, value);
103 }
104
105 void setTimeRangeTracker(TimeRangeTracker timeRangeTracker) {
106 this.timeRangeTracker = timeRangeTracker;
107 }
108
109 void setEntries(long entryCount) {
110 this.entryCount = entryCount;
111 }
112
113 public Long getMinimumTimestamp() {
114 return (timeRangeTracker == null) ?
115 null : timeRangeTracker.getMinimumTimestamp();
116 }
117
118 public Long getMaximumTimestamp() {
119 return (timeRangeTracker == null) ?
120 null : timeRangeTracker.getMaximumTimestamp();
121 }
122
123 @Override
124 public long getModificationTimeStamp() {
125 return modificationTime;
126 }
127
128 @Override
129 public HDFSBlocksDistribution getHDFSBlockDistribution() {
130 return hdfsBlocksDistribution;
131 }
132
133 @Override
134 public StoreFile.Reader getReader() {
135 final long len = this.length;
136 final TimeRangeTracker timeRange = this.timeRangeTracker;
137 final long entries = this.entryCount;
138 return new StoreFile.Reader() {
139 @Override
140 public long length() {
141 return len;
142 }
143
144 @Override
145 public long getMaxTimestamp() {
146 return timeRange == null ? Long.MAX_VALUE : timeRange.maximumTimestamp;
147 }
148
149 @Override
150 public long getEntries() {
151 return entries;
152 }
153 };
154 }
155 }