View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.io.hfile;
19  
20  import java.io.IOException;
21  import java.util.NavigableMap;
22  import java.util.NavigableSet;
23  import java.util.concurrent.ConcurrentSkipListMap;
24  import java.util.concurrent.ConcurrentSkipListSet;
25  
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.hbase.util.FastLongHistogram;
29  import org.codehaus.jackson.JsonGenerationException;
30  import org.codehaus.jackson.annotate.JsonIgnoreProperties;
31  import org.codehaus.jackson.map.JsonMappingException;
32  import org.codehaus.jackson.map.ObjectMapper;
33  import org.codehaus.jackson.map.SerializationConfig;
34  
35  /**
36   * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
37   * No attempt has been made at making this thread safe.
38   */
39  @InterfaceAudience.Private
40  public class BlockCacheUtil {
41    /**
42     * Needed generating JSON.
43     */
44    private static final ObjectMapper MAPPER = new ObjectMapper();
45    static {
46      MAPPER.configure(SerializationConfig.Feature.FAIL_ON_EMPTY_BEANS, false);
47      MAPPER.configure(SerializationConfig.Feature.FLUSH_AFTER_WRITE_VALUE, true);
48      MAPPER.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
49    }
50  
51    /**
52     * @param cb
53     * @return The block content as String.
54     */
55    public static String toString(final CachedBlock cb, final long now) {
56      return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb, now);
57    }
58  
59    /**
60     * Little data structure to hold counts for a file.
61     * Used doing a toJSON.
62     */
63    static class CachedBlockCountsPerFile {
64      private int count = 0;
65      private long size = 0;
66      private int countData = 0;
67      private long sizeData = 0;
68      private final String filename;
69  
70      CachedBlockCountsPerFile(final String filename) {
71        this.filename = filename;
72      }
73  
74      public int getCount() {
75        return count;
76      }
77  
78      public long getSize() {
79        return size;
80      }
81  
82      public int getCountData() {
83        return countData;
84      }
85  
86      public long getSizeData() {
87        return sizeData;
88      }
89  
90      public String getFilename() {
91        return filename;
92      }
93    }
94  
95    /**
96     * @param filename
97     * @param blocks
98     * @return A JSON String of <code>filename</code> and counts of <code>blocks</code>
99     * @throws JsonGenerationException
100    * @throws JsonMappingException
101    * @throws IOException
102    */
103   public static String toJSON(final String filename, final NavigableSet<CachedBlock> blocks)
104   throws JsonGenerationException, JsonMappingException, IOException {
105     CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename);
106     for (CachedBlock cb: blocks) {
107       counts.count++;
108       counts.size += cb.getSize();
109       BlockType bt = cb.getBlockType();
110       if (bt != null && bt.isData()) {
111         counts.countData++;
112         counts.sizeData += cb.getSize();
113       }
114     }
115     return MAPPER.writeValueAsString(counts);
116   }
117 
118   /**
119    * @param cbsbf
120    * @return JSON string of <code>cbsf</code> aggregated
121    * @throws JsonGenerationException
122    * @throws JsonMappingException
123    * @throws IOException
124    */
125   public static String toJSON(final CachedBlocksByFile cbsbf)
126   throws JsonGenerationException, JsonMappingException, IOException {
127     return MAPPER.writeValueAsString(cbsbf);
128   }
129 
130   /**
131    * @param bc
132    * @return JSON string of <code>bc</code> content.
133    * @throws JsonGenerationException
134    * @throws JsonMappingException
135    * @throws IOException
136    */
137   public static String toJSON(final BlockCache bc)
138   throws JsonGenerationException, JsonMappingException, IOException {
139     return MAPPER.writeValueAsString(bc);
140   }
141 
142   /**
143    * @param cb
144    * @return The block content of <code>bc</code> as a String minus the filename.
145    */
146   public static String toStringMinusFileName(final CachedBlock cb, final long now) {
147     return "offset=" + cb.getOffset() +
148       ", size=" + cb.getSize() +
149       ", age=" + (now - cb.getCachedTime()) +
150       ", type=" + cb.getBlockType() +
151       ", priority=" + cb.getBlockPriority();
152   }
153 
154   /**
155    * Get a {@link CachedBlocksByFile} instance and load it up by iterating content in
156    * {@link BlockCache}.
157    * @param conf Used to read configurations
158    * @param bc Block Cache to iterate.
159    * @return Laoded up instance of CachedBlocksByFile
160    */
161   public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf,
162       final BlockCache bc) {
163     CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf);
164     for (CachedBlock cb: bc) {
165       if (cbsbf.update(cb)) break;
166     }
167     return cbsbf;
168   }
169 
170   /**
171    * Use one of these to keep a running account of cached blocks by file.  Throw it away when done.
172    * This is different than metrics in that it is stats on current state of a cache.
173    * See getLoadedCachedBlocksByFile
174    */
175   @JsonIgnoreProperties({"cachedBlockStatsByFile"})
176   public static class CachedBlocksByFile {
177     private int count;
178     private int dataBlockCount;
179     private long size;
180     private long dataSize;
181     private final long now = System.nanoTime();
182     private final int max;
183     public static final int DEFAULT_MAX = 100000;
184  
185     CachedBlocksByFile() {
186       this(null);
187     }
188 
189     CachedBlocksByFile(final Configuration c) {
190       this.max = c == null? DEFAULT_MAX:
191         c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
192     }
193 
194     /**
195      * Map by filename. use concurent utils because we want our Map and contained blocks sorted.
196      */
197     private NavigableMap<String, NavigableSet<CachedBlock>> cachedBlockByFile =
198       new ConcurrentSkipListMap<String, NavigableSet<CachedBlock>>();
199     FastLongHistogram hist = new FastLongHistogram();
200 
201     /**
202      * @param cb
203      * @return True if full.... if we won't be adding any more.
204      */
205     public boolean update(final CachedBlock cb) {
206       if (isFull()) return true;
207       NavigableSet<CachedBlock> set = this.cachedBlockByFile.get(cb.getFilename());
208       if (set == null) {
209         set = new ConcurrentSkipListSet<CachedBlock>();
210         this.cachedBlockByFile.put(cb.getFilename(), set);
211       }
212       set.add(cb);
213       this.size += cb.getSize();
214       this.count++;
215       BlockType bt = cb.getBlockType();
216       if (bt != null && bt.isData()) {
217         this.dataBlockCount++;
218         this.dataSize += cb.getSize();
219       }
220       long age = this.now - cb.getCachedTime();
221       this.hist.add(age, 1);
222       return false;
223     }
224 
225     /**
226      * @return True if full; i.e. there are more items in the cache but we only loaded up
227      * the maximum set in configuration <code>hbase.ui.blockcache.by.file.max</code>
228      * (Default: DEFAULT_MAX).
229      */
230     public boolean isFull() {
231       return this.count >= this.max;
232     }
233  
234     public NavigableMap<String, NavigableSet<CachedBlock>> getCachedBlockStatsByFile() {
235       return this.cachedBlockByFile;
236     }
237 
238     /**
239      * @return count of blocks in the cache
240      */
241     public int getCount() {
242       return count;
243     }
244 
245     public int getDataCount() {
246       return dataBlockCount;
247     }
248 
249     /**
250      * @return size of blocks in the cache
251      */
252     public long getSize() {
253       return size;
254     }
255 
256     /**
257      * @return Size of data.
258      */
259     public long getDataSize() {
260       return dataSize;
261     }
262 
263     public AgeSnapshot getAgeInCacheSnapshot() {
264       return new AgeSnapshot(this.hist);
265     }
266 
267     @Override
268     public String toString() {
269       AgeSnapshot snapshot = getAgeInCacheSnapshot();
270       return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size +
271           ", dataSize=" + getDataSize() +
272           ", mean age=" + snapshot.getMean() +
273           ", min age=" + snapshot.getMin() +
274           ", max age=" + snapshot.getMax() +
275           ", 75th percentile age="   + snapshot.get75thPercentile() +
276           ", 95th percentile age="   + snapshot.get95thPercentile() +
277           ", 98th percentile age="   + snapshot.get98thPercentile() +
278           ", 99th percentile age="   + snapshot.get99thPercentile() +
279           ", 99.9th percentile age=" + snapshot.get99thPercentile();
280     }
281   }
282 }