View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master.balancer;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.assertNotNull;
22  import static org.junit.Assert.assertNull;
23  import static org.junit.Assert.assertTrue;
24  import static org.mockito.Mockito.mock;
25  import static org.mockito.Mockito.when;
26  
27  import java.util.ArrayList;
28  import java.util.Arrays;
29  import java.util.HashMap;
30  import java.util.Iterator;
31  import java.util.List;
32  import java.util.Map;
33  import java.util.Map.Entry;
34  import java.util.Queue;
35  import java.util.TreeMap;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.hbase.ClusterStatus;
41  import org.apache.hadoop.hbase.HBaseConfiguration;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.RegionLoad;
44  import org.apache.hadoop.hbase.ServerLoad;
45  import org.apache.hadoop.hbase.ServerName;
46  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
47  import org.apache.hadoop.hbase.master.MockNoopMasterServices;
48  import org.apache.hadoop.hbase.master.RackManager;
49  import org.apache.hadoop.hbase.master.RegionPlan;
50  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
51  import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.ServerLocalityCostFunction;
52  import org.apache.hadoop.hbase.testclassification.MediumTests;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.junit.Test;
55  import org.junit.experimental.categories.Category;
56  
57  @Category(MediumTests.class)
58  public class TestStochasticLoadBalancer extends BalancerTestBase {
59    public static final String REGION_KEY = "testRegion";
60    private static final Log LOG = LogFactory.getLog(TestStochasticLoadBalancer.class);
61  
62    // Mapping of locality test -> expected locality
63    private float[] expectedLocalities = {1.0f, 0.0f, 0.50f, 0.25f, 1.0f};
64  
65    /**
66     * Data set for testLocalityCost:
67     * [test][0][0] = mapping of server to number of regions it hosts
68     * [test][region + 1][0] = server that region is hosted on
69     * [test][region + 1][server + 1] = locality for region on server
70     */
71  
72    private int[][][] clusterRegionLocationMocks = new int[][][]{
73  
74        // Test 1: each region is entirely on server that hosts it
75        new int[][]{
76            new int[]{2, 1, 1},
77            new int[]{2, 0, 0, 100},   // region 0 is hosted and entirely local on server 2
78            new int[]{0, 100, 0, 0},   // region 1 is hosted and entirely on server 0
79            new int[]{0, 100, 0, 0},   // region 2 is hosted and entirely on server 0
80            new int[]{1, 0, 100, 0},   // region 1 is hosted and entirely on server 1
81        },
82  
83        // Test 2: each region is 0% local on the server that hosts it
84        new int[][]{
85            new int[]{1, 2, 1},
86            new int[]{0, 0, 0, 100},   // region 0 is hosted and entirely local on server 2
87            new int[]{1, 100, 0, 0},   // region 1 is hosted and entirely on server 0
88            new int[]{1, 100, 0, 0},   // region 2 is hosted and entirely on server 0
89            new int[]{2, 0, 100, 0},   // region 1 is hosted and entirely on server 1
90        },
91  
92        // Test 3: each region is 25% local on the server that hosts it (and 50% locality is possible)
93        new int[][]{
94            new int[]{1, 2, 1},
95            new int[]{0, 25, 0, 50},   // region 0 is hosted and entirely local on server 2
96            new int[]{1, 50, 25, 0},   // region 1 is hosted and entirely on server 0
97            new int[]{1, 50, 25, 0},   // region 2 is hosted and entirely on server 0
98            new int[]{2, 0, 50, 25},   // region 1 is hosted and entirely on server 1
99        },
100 
101       // Test 4: each region is 25% local on the server that hosts it (and 100% locality is possible)
102       new int[][]{
103           new int[]{1, 2, 1},
104           new int[]{0, 25, 0, 100},   // region 0 is hosted and entirely local on server 2
105           new int[]{1, 100, 25, 0},   // region 1 is hosted and entirely on server 0
106           new int[]{1, 100, 25, 0},   // region 2 is hosted and entirely on server 0
107           new int[]{2, 0, 100, 25},   // region 1 is hosted and entirely on server 1
108       },
109 
110       // Test 5: each region is 75% local on the server that hosts it (and 75% locality is possible everywhere)
111       new int[][]{
112           new int[]{1, 2, 1},
113           new int[]{0, 75, 75, 75},   // region 0 is hosted and entirely local on server 2
114           new int[]{1, 75, 75, 75},   // region 1 is hosted and entirely on server 0
115           new int[]{1, 75, 75, 75},   // region 2 is hosted and entirely on server 0
116           new int[]{2, 75, 75, 75},   // region 1 is hosted and entirely on server 1
117       },
118   };
119 
120 
121   @Test
122   public void testKeepRegionLoad() throws Exception {
123 
124     ServerName sn = ServerName.valueOf("test:8080", 100);
125     int numClusterStatusToAdd = 20000;
126     for (int i = 0; i < numClusterStatusToAdd; i++) {
127       ServerLoad sl = mock(ServerLoad.class);
128 
129       RegionLoad rl = mock(RegionLoad.class);
130       when(rl.getStores()).thenReturn(i);
131 
132       Map<byte[], RegionLoad> regionLoadMap =
133           new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
134       regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl);
135       when(sl.getRegionsLoad()).thenReturn(regionLoadMap);
136 
137       ClusterStatus clusterStatus = mock(ClusterStatus.class);
138       when(clusterStatus.getServers()).thenReturn(Arrays.asList(sn));
139       when(clusterStatus.getLoad(sn)).thenReturn(sl);
140 
141       loadBalancer.setClusterStatus(clusterStatus);
142     }
143     assertTrue(loadBalancer.loads.get(REGION_KEY) != null);
144     assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15);
145 
146     Queue<RegionLoad> loads = loadBalancer.loads.get(REGION_KEY);
147     int i = 0;
148     while(loads.size() > 0) {
149       RegionLoad rl = loads.remove();
150       assertEquals(i + (numClusterStatusToAdd - 15), rl.getStores());
151       i ++;
152     }
153   }
154 
155   /**
156    * Test the load balancing algorithm.
157    *
158    * Invariant is that all servers should be hosting either floor(average) or
159    * ceiling(average)
160    *
161    * @throws Exception
162    */
163   @Test
164   public void testBalanceCluster() throws Exception {
165 
166     for (int[] mockCluster : clusterStateMocks) {
167       Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
168       List<ServerAndLoad> list = convertToList(servers);
169       LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
170       List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
171       List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
172       LOG.info("Mock Balance : " + printMock(balancedCluster));
173       assertClusterAsBalanced(balancedCluster);
174       List<RegionPlan> secondPlans =  loadBalancer.balanceCluster(servers);
175       assertNull(secondPlans);
176       for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
177         returnRegions(entry.getValue());
178         returnServer(entry.getKey());
179       }
180     }
181   }
182 
183   @Test
184   public void testLocalityCost() throws Exception {
185     Configuration conf = HBaseConfiguration.create();
186     MockNoopMasterServices master = new MockNoopMasterServices();
187     StochasticLoadBalancer.CostFunction
188         costFunction = new ServerLocalityCostFunction(conf, master);
189 
190     for (int test = 0; test < clusterRegionLocationMocks.length; test++) {
191       int[][] clusterRegionLocations = clusterRegionLocationMocks[test];
192       MockCluster cluster = new MockCluster(clusterRegionLocations);
193       costFunction.init(cluster);
194       double cost = costFunction.cost();
195       double expected = 1 - expectedLocalities[test];
196       assertEquals(expected, cost, 0.001);
197     }
198   }
199 
200   @Test
201   public void testSkewCost() {
202     Configuration conf = HBaseConfiguration.create();
203     StochasticLoadBalancer.CostFunction
204         costFunction = new StochasticLoadBalancer.RegionCountSkewCostFunction(conf);
205     for (int[] mockCluster : clusterStateMocks) {
206       costFunction.init(mockCluster(mockCluster));
207       double cost = costFunction.cost();
208       assertTrue(cost >= 0);
209       assertTrue(cost <= 1.01);
210     }
211 
212     costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
213     assertEquals(0,costFunction.cost(), 0.01);
214     costFunction.init(mockCluster(new int[]{0, 0, 0, 1, 1}));
215     assertEquals(0, costFunction.cost(), 0.01);
216     costFunction.init(mockCluster(new int[]{0, 0, 1, 1, 1}));
217     assertEquals(0, costFunction.cost(), 0.01);
218     costFunction.init(mockCluster(new int[]{0, 1, 1, 1, 1}));
219     assertEquals(0, costFunction.cost(), 0.01);
220     costFunction.init(mockCluster(new int[]{1, 1, 1, 1, 1}));
221     assertEquals(0, costFunction.cost(), 0.01);
222     costFunction.init(mockCluster(new int[]{10000, 0, 0, 0, 0}));
223     assertEquals(1, costFunction.cost(), 0.01);
224   }
225 
226   @Test
227   public void testTableSkewCost() {
228     Configuration conf = HBaseConfiguration.create();
229     StochasticLoadBalancer.CostFunction
230         costFunction = new StochasticLoadBalancer.TableSkewCostFunction(conf);
231     for (int[] mockCluster : clusterStateMocks) {
232       BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
233       costFunction.init(cluster);
234       double cost = costFunction.cost();
235       assertTrue(cost >= 0);
236       assertTrue(cost <= 1.01);
237     }
238   }
239 
240   @Test
241   public void testCostFromArray() {
242     Configuration conf = HBaseConfiguration.create();
243     StochasticLoadBalancer.CostFromRegionLoadFunction
244         costFunction = new StochasticLoadBalancer.MemstoreSizeCostFunction(conf);
245     costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
246 
247     double[] statOne = new double[100];
248     for (int i =0; i < 100; i++) {
249       statOne[i] = 10;
250     }
251     assertEquals(0, costFunction.costFromArray(statOne), 0.01);
252 
253     double[] statTwo= new double[101];
254     for (int i =0; i < 100; i++) {
255       statTwo[i] = 0;
256     }
257     statTwo[100] = 100;
258     assertEquals(1, costFunction.costFromArray(statTwo), 0.01);
259 
260     double[] statThree = new double[200];
261     for (int i =0; i < 100; i++) {
262       statThree[i] = (0);
263       statThree[i+100] = 100;
264     }
265     assertEquals(0.5, costFunction.costFromArray(statThree), 0.01);
266   }
267 
268   @Test(timeout =  60000)
269   public void testLosingRs() throws Exception {
270     int numNodes = 3;
271     int numRegions = 20;
272     int numRegionsPerServer = 3; //all servers except one
273     int replication = 1;
274     int numTables = 2;
275 
276     Map<ServerName, List<HRegionInfo>> serverMap =
277         createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
278     List<ServerAndLoad> list = convertToList(serverMap);
279 
280 
281     List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap);
282     assertNotNull(plans);
283 
284     // Apply the plan to the mock cluster.
285     List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
286 
287     assertClusterAsBalanced(balancedCluster);
288 
289     ServerName sn = serverMap.keySet().toArray(new ServerName[serverMap.size()])[0];
290 
291     ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 100);
292 
293     serverMap.put(deadSn, new ArrayList<HRegionInfo>(0));
294 
295     plans = loadBalancer.balanceCluster(serverMap);
296     assertNull(plans);
297   }
298 
299   @Test
300   public void testReplicaCost() {
301     Configuration conf = HBaseConfiguration.create();
302     StochasticLoadBalancer.CostFunction
303         costFunction = new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
304     for (int[] mockCluster : clusterStateMocks) {
305       BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
306       costFunction.init(cluster);
307       double cost = costFunction.cost();
308       assertTrue(cost >= 0);
309       assertTrue(cost <= 1.01);
310     }
311   }
312 
313   @Test
314   public void testReplicaCostForReplicas() {
315     Configuration conf = HBaseConfiguration.create();
316     StochasticLoadBalancer.CostFunction
317         costFunction = new StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
318 
319     int [] servers = new int[] {3,3,3,3,3};
320     TreeMap<ServerName, List<HRegionInfo>> clusterState = mockClusterServers(servers);
321 
322     BaseLoadBalancer.Cluster cluster;
323 
324     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
325     costFunction.init(cluster);
326     double costWithoutReplicas = costFunction.cost();
327     assertEquals(0, costWithoutReplicas, 0);
328 
329     // replicate the region from first server to the last server
330     HRegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica(
331       clusterState.firstEntry().getValue().get(0),1);
332     clusterState.lastEntry().getValue().add(replica1);
333 
334     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
335     costFunction.init(cluster);
336     double costWith1ReplicaDifferentServer = costFunction.cost();
337 
338     assertEquals(0, costWith1ReplicaDifferentServer, 0);
339 
340     // add a third replica to the last server
341     HRegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 2);
342     clusterState.lastEntry().getValue().add(replica2);
343 
344     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
345     costFunction.init(cluster);
346     double costWith1ReplicaSameServer = costFunction.cost();
347 
348     assertTrue(costWith1ReplicaDifferentServer < costWith1ReplicaSameServer);
349 
350     // test with replication = 4 for following:
351 
352     HRegionInfo replica3;
353     Iterator<Entry<ServerName, List<HRegionInfo>>> it;
354     Entry<ServerName, List<HRegionInfo>> entry;
355 
356     clusterState = mockClusterServers(servers);
357     it = clusterState.entrySet().iterator();
358     entry = it.next(); //first server
359     HRegionInfo hri = entry.getValue().get(0);
360     replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
361     replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
362     replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
363     entry.getValue().add(replica1);
364     entry.getValue().add(replica2);
365     it.next().getValue().add(replica3); //2nd server
366 
367     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
368     costFunction.init(cluster);
369     double costWith3ReplicasSameServer = costFunction.cost();
370 
371     clusterState = mockClusterServers(servers);
372     hri = clusterState.firstEntry().getValue().get(0);
373     replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
374     replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
375     replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
376 
377     clusterState.firstEntry().getValue().add(replica1);
378     clusterState.lastEntry().getValue().add(replica2);
379     clusterState.lastEntry().getValue().add(replica3);
380 
381     cluster = new BaseLoadBalancer.Cluster(clusterState, null, null, null);
382     costFunction.init(cluster);
383     double costWith2ReplicasOnTwoServers = costFunction.cost();
384 
385     assertTrue(costWith2ReplicasOnTwoServers < costWith3ReplicasSameServer);
386   }
387 
388   @Test
389   public void testNeedsBalanceForColocatedReplicas() {
390     // check for the case where there are two hosts and with one rack, and where
391     // both the replicas are hosted on the same server
392     List<HRegionInfo> regions = randomRegions(1);
393     ServerName s1 = ServerName.valueOf("host1", 1000, 11111);
394     ServerName s2 = ServerName.valueOf("host11", 1000, 11111);
395     Map<ServerName, List<HRegionInfo>> map = new HashMap<ServerName, List<HRegionInfo>>();
396     map.put(s1, regions);
397     regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
398     // until the step above s1 holds two replicas of a region
399     regions = randomRegions(1);
400     map.put(s2, regions);
401     assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null, null)));
402     // check for the case where there are two hosts on the same rack and there are two racks
403     // and both the replicas are on the same rack
404     map.clear();
405     regions = randomRegions(1);
406     List<HRegionInfo> regionsOnS2 = new ArrayList<HRegionInfo>(1);
407     regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
408     map.put(s1, regions);
409     map.put(s2, regionsOnS2);
410     // add another server so that the cluster has some host on another rack
411     map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1));
412     assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null,
413         new ForTestRackManagerOne())));
414   }
415 
416   @Test (timeout = 60000)
417   public void testSmallCluster() {
418     int numNodes = 10;
419     int numRegions = 1000;
420     int numRegionsPerServer = 40; //all servers except one
421     int replication = 1;
422     int numTables = 10;
423     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
424   }
425 
426   @Test (timeout = 60000)
427   public void testSmallCluster2() {
428     int numNodes = 20;
429     int numRegions = 2000;
430     int numRegionsPerServer = 40; //all servers except one
431     int replication = 1;
432     int numTables = 10;
433     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
434   }
435 
436   @Test (timeout = 60000)
437   public void testSmallCluster3() {
438     int numNodes = 20;
439     int numRegions = 2000;
440     int numRegionsPerServer = 1; // all servers except one
441     int replication = 1;
442     int numTables = 10;
443     /* fails because of max moves */
444     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, false, false);
445   }
446 
447   @Test (timeout = 800000)
448   public void testMidCluster() {
449     int numNodes = 100;
450     int numRegions = 10000;
451     int numRegionsPerServer = 60; // all servers except one
452     int replication = 1;
453     int numTables = 40;
454     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
455   }
456 
457   @Test (timeout = 800000)
458   public void testMidCluster2() {
459     int numNodes = 200;
460     int numRegions = 100000;
461     int numRegionsPerServer = 40; // all servers except one
462     int replication = 1;
463     int numTables = 400;
464     testWithCluster(numNodes,
465         numRegions,
466         numRegionsPerServer,
467         replication,
468         numTables,
469         false, /* num large num regions means may not always get to best balance with one run */
470         false);
471   }
472 
473 
474   @Test (timeout = 800000)
475   public void testMidCluster3() {
476     int numNodes = 100;
477     int numRegions = 2000;
478     int numRegionsPerServer = 9; // all servers except one
479     int replication = 1;
480     int numTables = 110;
481     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
482     // TODO(eclark): Make sure that the tables are well distributed.
483   }
484 
485   @Test
486   public void testLargeCluster() {
487     int numNodes = 1000;
488     int numRegions = 100000; //100 regions per RS
489     int numRegionsPerServer = 80; //all servers except one
490     int numTables = 100;
491     int replication = 1;
492     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
493   }
494 
495   @Test (timeout = 800000)
496   public void testRegionReplicasOnSmallCluster() {
497     int numNodes = 10;
498     int numRegions = 1000;
499     int replication = 3; // 3 replicas per region
500     int numRegionsPerServer = 80; //all regions are mostly balanced
501     int numTables = 10;
502     testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
503   }
504 
505   @Test (timeout = 800000)
506   public void testRegionReplicationOnMidClusterSameHosts() {
507     conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
508     conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec
509     conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
510     loadBalancer.setConf(conf);
511     int numHosts = 100;
512     int numRegions = 100 * 100;
513     int replication = 3; // 3 replicas per region
514     int numRegionsPerServer = 5;
515     int numTables = 10;
516     Map<ServerName, List<HRegionInfo>> serverMap =
517         createServerMap(numHosts, numRegions, numRegionsPerServer, replication, numTables);
518     int numNodesPerHost = 4;
519 
520     // create a new map with 4 RS per host.
521     Map<ServerName, List<HRegionInfo>> newServerMap = new TreeMap<ServerName, List<HRegionInfo>>(serverMap);
522     for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
523       for (int i=1; i < numNodesPerHost; i++) {
524         ServerName s1 = entry.getKey();
525         ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 1); // create an RS for the same host
526         newServerMap.put(s2, new ArrayList<HRegionInfo>());
527       }
528     }
529 
530     testWithCluster(newServerMap, null, true, true);
531   }
532 
533   private static class ForTestRackManager extends RackManager {
534     int numRacks;
535     public ForTestRackManager(int numRacks) {
536       this.numRacks = numRacks;
537     }
538     @Override
539     public String getRack(ServerName server) {
540       return "rack_" + (server.hashCode() % numRacks);
541     }
542   }
543 
544   private static class ForTestRackManagerOne extends RackManager {
545   @Override
546     public String getRack(ServerName server) {
547       return server.getHostname().endsWith("1") ? "rack1" : "rack2";
548     }
549   }
550 
551   @Test (timeout = 800000)
552   public void testRegionReplicationOnMidClusterWithRacks() {
553     conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L);
554     conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
555     conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec
556     loadBalancer.setConf(conf);
557     int numNodes = 30;
558     int numRegions = numNodes * 30;
559     int replication = 3; // 3 replicas per region
560     int numRegionsPerServer = 28;
561     int numTables = 10;
562     int numRacks = 4; // all replicas should be on a different rack
563     Map<ServerName, List<HRegionInfo>> serverMap =
564         createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
565     RackManager rm = new ForTestRackManager(numRacks);
566 
567     testWithCluster(serverMap, rm, false, true);
568   }
569 
570   // This mock allows us to test the LocalityCostFunction
571   private class MockCluster extends BaseLoadBalancer.Cluster {
572 
573     private int[][] localities = null;   // [region][server] = percent of blocks
574 
575     public MockCluster(int[][] regions) {
576 
577       // regions[0] is an array where index = serverIndex an value = number of regions
578       super(mockClusterServers(regions[0], 1), null, null, null);
579 
580       localities = new int[regions.length - 1][];
581       for (int i = 1; i < regions.length; i++) {
582         int regionIndex = i - 1;
583         localities[regionIndex] = new int[regions[i].length - 1];
584         regionIndexToServerIndex[regionIndex] = regions[i][0];
585         for (int j = 1; j < regions[i].length; j++) {
586           int serverIndex = j - 1;
587           localities[regionIndex][serverIndex] = regions[i][j] > 100 ? regions[i][j] % 100 : regions[i][j];
588         }
589       }
590     }
591 
592     @Override
593     float getLocalityOfRegion(int region, int server) {
594       // convert the locality percentage to a fraction
595       return localities[region][server] / 100.0f;
596     }
597 
598     @Override
599     public int getRegionSizeMB(int region) {
600       return 1;
601     }
602 
603   }
604 
605 }