View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.Closeable;
22  import java.io.IOException;
23  import java.io.InterruptedIOException;
24  import java.net.SocketTimeoutException;
25  import java.util.ArrayList;
26  import java.util.Arrays;
27  import java.util.HashMap;
28  import java.util.LinkedList;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.Map.Entry;
32  import java.util.concurrent.atomic.AtomicInteger;
33  import java.util.concurrent.atomic.AtomicReference;
34  import java.util.concurrent.ExecutionException;
35  import java.util.concurrent.Future;
36  import java.util.concurrent.TimeUnit;
37  import java.util.concurrent.TimeoutException;
38  import java.util.regex.Pattern;
39  
40  import org.apache.commons.logging.Log;
41  import org.apache.commons.logging.LogFactory;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.hbase.Abortable;
44  import org.apache.hadoop.hbase.ClusterStatus;
45  import org.apache.hadoop.hbase.DoNotRetryIOException;
46  import org.apache.hadoop.hbase.HBaseConfiguration;
47  import org.apache.hadoop.hbase.HColumnDescriptor;
48  import org.apache.hadoop.hbase.HConstants;
49  import org.apache.hadoop.hbase.HRegionInfo;
50  import org.apache.hadoop.hbase.HRegionLocation;
51  import org.apache.hadoop.hbase.HTableDescriptor;
52  import org.apache.hadoop.hbase.MasterNotRunningException;
53  import org.apache.hadoop.hbase.MetaTableAccessor;
54  import org.apache.hadoop.hbase.NamespaceDescriptor;
55  import org.apache.hadoop.hbase.NotServingRegionException;
56  import org.apache.hadoop.hbase.ProcedureInfo;
57  import org.apache.hadoop.hbase.RegionException;
58  import org.apache.hadoop.hbase.RegionLocations;
59  import org.apache.hadoop.hbase.ServerName;
60  import org.apache.hadoop.hbase.TableExistsException;
61  import org.apache.hadoop.hbase.TableName;
62  import org.apache.hadoop.hbase.TableNotDisabledException;
63  import org.apache.hadoop.hbase.TableNotEnabledException;
64  import org.apache.hadoop.hbase.TableNotFoundException;
65  import org.apache.hadoop.hbase.UnknownRegionException;
66  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
67  import org.apache.hadoop.hbase.backup.BackupRequest;
68  import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
69  import org.apache.hadoop.hbase.classification.InterfaceAudience;
70  import org.apache.hadoop.hbase.classification.InterfaceStability;
71  import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation;
72  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
73  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
74  import org.apache.hadoop.hbase.exceptions.DeserializationException;
75  import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
76  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
77  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
78  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
79  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
80  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
81  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
82  import org.apache.hadoop.hbase.protobuf.RequestConverter;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
86  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
87  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
88  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
89  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
90  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
91  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
93  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
94  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
95  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest;
96  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse;
97  import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
98  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
99  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
100 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
101 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
102 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
103 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
135 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
136 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
137 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
138 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
140 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
141 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
142 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
144 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
145 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
146 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
148 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
149 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
150 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
151 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
152 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
153 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
154 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
155 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
156 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
157 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
158 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
159 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
160 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
161 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
162 import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
163 import org.apache.hadoop.hbase.quotas.QuotaFilter;
164 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
165 import org.apache.hadoop.hbase.quotas.QuotaSettings;
166 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
167 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
168 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
169 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
170 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
171 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
172 import org.apache.hadoop.hbase.util.Addressing;
173 import org.apache.hadoop.hbase.util.Bytes;
174 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
175 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
176 import org.apache.hadoop.hbase.util.Pair;
177 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
178 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
179 import org.apache.hadoop.ipc.RemoteException;
180 import org.apache.hadoop.util.StringUtils;
181 import org.apache.zookeeper.KeeperException;
182 
183 import com.google.common.annotations.VisibleForTesting;
184 import com.google.protobuf.ByteString;
185 import com.google.protobuf.ServiceException;
186 
187 /**
188  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
189  * this is an HBase-internal class as defined in
190  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
191  * There are no guarantees for backwards source / binary compatibility and methods or class can
192  * change or go away without deprecation.
193  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
194  * an HBaseAdmin directly.
195  *
196  * <p>Connection should be an <i>unmanaged</i> connection obtained via
197  * {@link ConnectionFactory#createConnection(Configuration)}
198  *
199  * @see ConnectionFactory
200  * @see Connection
201  * @see Admin
202  */
203 @InterfaceAudience.Private
204 @InterfaceStability.Evolving
205 public class HBaseAdmin implements Admin {
206   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
207 
208   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
209 
210   private ClusterConnection connection;
211 
212   private volatile Configuration conf;
213   private final long pause;
214   private final int numRetries;
215   // Some operations can take a long time such as disable of big table.
216   // numRetries is for 'normal' stuff... Multiply by this factor when
217   // want to wait a long time.
218   private final int retryLongerMultiplier;
219   private final int syncWaitTimeout;
220   private final long backupWaitTimeout;
221   private boolean aborted;
222   private boolean cleanupConnectionOnClose = false; // close the connection in close()
223   private boolean closed = false;
224   private int operationTimeout;
225   private int rpcTimeout;
226 
227   private RpcRetryingCallerFactory rpcCallerFactory;
228   private RpcControllerFactory rpcControllerFactory;
229 
230   private NonceGenerator ng;
231 
232   /**
233    * Constructor.
234    * See {@link #HBaseAdmin(Connection connection)}
235    *
236    * @param c Configuration object. Copied internally.
237    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
238    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
239    */
240   @Deprecated
241   public HBaseAdmin(Configuration c)
242   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
243     // Will not leak connections, as the new implementation of the constructor
244     // does not throw exceptions anymore.
245     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
246     this.cleanupConnectionOnClose = true;
247   }
248 
249   @Override
250   public int getOperationTimeout() {
251     return operationTimeout;
252   }
253 
254 
255   /**
256    * Constructor for externally managed Connections.
257    * The connection to master will be created when required by admin functions.
258    *
259    * @param connection The Connection instance to use
260    * @throws MasterNotRunningException, ZooKeeperConnectionException are not
261    *  thrown anymore but kept into the interface for backward api compatibility
262    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
263    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
264    */
265   @Deprecated
266   public HBaseAdmin(Connection connection)
267       throws MasterNotRunningException, ZooKeeperConnectionException {
268     this((ClusterConnection)connection);
269   }
270 
271   HBaseAdmin(ClusterConnection connection) {
272     this.conf = connection.getConfiguration();
273     this.connection = connection;
274 
275     // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
276     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
277         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
278     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
279         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
280     this.retryLongerMultiplier = this.conf.getInt(
281         "hbase.client.retries.longer.multiplier", 10);
282     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
283         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
284     this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
285         HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
286     this.syncWaitTimeout = this.conf.getInt(
287       "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
288     this.backupWaitTimeout = this.conf.getInt(
289       "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h
290     this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
291     this.rpcControllerFactory = connection.getRpcControllerFactory();
292 
293     this.ng = this.connection.getNonceGenerator();
294   }
295 
296   @Override
297   public void abort(String why, Throwable e) {
298     // Currently does nothing but throw the passed message and exception
299     this.aborted = true;
300     throw new RuntimeException(why, e);
301   }
302 
303   @Override
304   public boolean isAborted(){
305     return this.aborted;
306   }
307 
308   /**
309    * Abort a procedure
310    * @param procId ID of the procedure to abort
311    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
312    * @return true if aborted, false if procedure already completed or does not exist
313    * @throws IOException
314    */
315   @Override
316   public boolean abortProcedure(
317       final long procId,
318       final boolean mayInterruptIfRunning) throws IOException {
319     Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
320     try {
321       return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
322     } catch (InterruptedException e) {
323       throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
324     } catch (TimeoutException e) {
325       throw new TimeoutIOException(e);
326     } catch (ExecutionException e) {
327       if (e.getCause() instanceof IOException) {
328         throw (IOException)e.getCause();
329       } else {
330         throw new IOException(e.getCause());
331       }
332     }
333   }
334 
335   /**
336    * Abort a procedure but does not block and wait for it be completely removed.
337    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
338    * It may throw ExecutionException if there was an error while executing the operation
339    * or TimeoutException in case the wait timeout was not long enough to allow the
340    * operation to complete.
341    *
342    * @param procId ID of the procedure to abort
343    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
344    * @return true if aborted, false if procedure already completed or does not exist
345    * @throws IOException
346    */
347   @Override
348   public Future<Boolean> abortProcedureAsync(
349       final long procId,
350       final boolean mayInterruptIfRunning) throws IOException {
351     Boolean abortProcResponse = executeCallable(
352       new MasterCallable<AbortProcedureResponse>(getConnection()) {
353         @Override
354         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
355           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
356           controller.setCallTimeout(callTimeout);
357           AbortProcedureRequest abortProcRequest =
358               AbortProcedureRequest.newBuilder().setProcId(procId).build();
359           return master.abortProcedure(controller, abortProcRequest);
360         }
361       }).getIsProcedureAborted();
362 
363     AbortProcedureFuture abortProcFuture =
364         new AbortProcedureFuture(this, procId, abortProcResponse);
365     return abortProcFuture;
366   }
367 
368   private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
369     private boolean isAbortInProgress;
370 
371     public AbortProcedureFuture(
372         final HBaseAdmin admin,
373         final Long procId,
374         final Boolean abortProcResponse) {
375       super(admin, procId);
376       this.isAbortInProgress = abortProcResponse;
377     }
378 
379     @Override
380     public Boolean get(long timeout, TimeUnit unit)
381         throws InterruptedException, ExecutionException, TimeoutException {
382       if (!this.isAbortInProgress) {
383         return false;
384       }
385       super.get(timeout, unit);
386       return true;
387     }
388   }
389 
390   /** @return HConnection used by this object. */
391   @Override
392   public HConnection getConnection() {
393     return connection;
394   }
395 
396   /** @return - true if the master server is running. Throws an exception
397    *  otherwise.
398    * @throws ZooKeeperConnectionException
399    * @throws MasterNotRunningException
400    * @deprecated this has been deprecated without a replacement
401    */
402   @Deprecated
403   public boolean isMasterRunning()
404   throws MasterNotRunningException, ZooKeeperConnectionException {
405     return connection.isMasterRunning();
406   }
407 
408   /**
409    * @param tableName Table to check.
410    * @return True if table exists already.
411    * @throws IOException
412    */
413   @Override
414   public boolean tableExists(final TableName tableName) throws IOException {
415     return MetaTableAccessor.tableExists(connection, tableName);
416   }
417 
418   public boolean tableExists(final byte[] tableName)
419   throws IOException {
420     return tableExists(TableName.valueOf(tableName));
421   }
422 
423   public boolean tableExists(final String tableName)
424   throws IOException {
425     return tableExists(TableName.valueOf(tableName));
426   }
427 
428   @Override
429   public HTableDescriptor[] listTables() throws IOException {
430     return listTables((Pattern)null, false);
431   }
432 
433   @Override
434   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
435     return listTables(pattern, false);
436   }
437 
438   @Override
439   public HTableDescriptor[] listTables(String regex) throws IOException {
440     return listTables(Pattern.compile(regex), false);
441   }
442 
443   @Override
444   public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
445       throws IOException {
446     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
447       @Override
448       public HTableDescriptor[] call(int callTimeout) throws ServiceException {
449         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
450         controller.setCallTimeout(callTimeout);
451         GetTableDescriptorsRequest req =
452             RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
453         return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
454       }
455     });
456   }
457 
458   @Override
459   public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
460       throws IOException {
461     return listTables(Pattern.compile(regex), includeSysTables);
462   }
463 
464   /**
465    * List all of the names of userspace tables.
466    * @return String[] table names
467    * @throws IOException if a remote or network exception occurs
468    * @deprecated Use {@link Admin#listTableNames()} instead
469    */
470   @Deprecated
471   public String[] getTableNames() throws IOException {
472     TableName[] tableNames = listTableNames();
473     String result[] = new String[tableNames.length];
474     for (int i = 0; i < tableNames.length; i++) {
475       result[i] = tableNames[i].getNameAsString();
476     }
477     return result;
478   }
479 
480   /**
481    * List all of the names of userspace tables matching the given regular expression.
482    * @param pattern The regular expression to match against
483    * @return String[] table names
484    * @throws IOException if a remote or network exception occurs
485    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
486    */
487   @Deprecated
488   public String[] getTableNames(Pattern pattern) throws IOException {
489     TableName[] tableNames = listTableNames(pattern);
490     String result[] = new String[tableNames.length];
491     for (int i = 0; i < tableNames.length; i++) {
492       result[i] = tableNames[i].getNameAsString();
493     }
494     return result;
495   }
496 
497   /**
498    * List all of the names of userspace tables matching the given regular expression.
499    * @param regex The regular expression to match against
500    * @return String[] table names
501    * @throws IOException if a remote or network exception occurs
502    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
503    */
504   @Deprecated
505   public String[] getTableNames(String regex) throws IOException {
506     return getTableNames(Pattern.compile(regex));
507   }
508 
509   @Override
510   public TableName[] listTableNames() throws IOException {
511     return listTableNames((Pattern)null, false);
512   }
513 
514   @Override
515   public TableName[] listTableNames(Pattern pattern) throws IOException {
516     return listTableNames(pattern, false);
517   }
518 
519   @Override
520   public TableName[] listTableNames(String regex) throws IOException {
521     return listTableNames(Pattern.compile(regex), false);
522   }
523 
524   @Override
525   public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
526       throws IOException {
527     return executeCallable(new MasterCallable<TableName[]>(getConnection()) {
528       @Override
529       public TableName[] call(int callTimeout) throws ServiceException {
530         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
531         controller.setCallTimeout(callTimeout);
532         GetTableNamesRequest req =
533             RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
534         return ProtobufUtil.getTableNameArray(master.getTableNames(controller, req)
535             .getTableNamesList());
536       }
537     });
538   }
539 
540   @Override
541   public TableName[] listTableNames(final String regex, final boolean includeSysTables)
542       throws IOException {
543     return listTableNames(Pattern.compile(regex), includeSysTables);
544   }
545 
546   /**
547    * Method for getting the tableDescriptor
548    * @param tableName as a byte []
549    * @return the tableDescriptor
550    * @throws TableNotFoundException
551    * @throws IOException if a remote or network exception occurs
552    */
553   @Override
554   public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
555      return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
556        operationTimeout, rpcTimeout);
557   }
558 
559   static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection,
560       RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
561       int operationTimeout, int rpcTimeout) throws IOException {
562       if (tableName == null) return null;
563       HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(connection) {
564         @Override
565         public HTableDescriptor call(int callTimeout) throws ServiceException {
566         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
567         controller.setCallTimeout(callTimeout);
568           GetTableDescriptorsResponse htds;
569           GetTableDescriptorsRequest req =
570                   RequestConverter.buildGetTableDescriptorsRequest(tableName);
571           htds = master.getTableDescriptors(controller, req);
572 
573           if (!htds.getTableSchemaList().isEmpty()) {
574             return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
575           }
576           return null;
577         }
578       }, rpcCallerFactory, operationTimeout, rpcTimeout);
579       if (htd != null) {
580         return htd;
581       }
582       throw new TableNotFoundException(tableName.getNameAsString());
583   }
584 
585   public HTableDescriptor getTableDescriptor(final byte[] tableName)
586   throws TableNotFoundException, IOException {
587     return getTableDescriptor(TableName.valueOf(tableName));
588   }
589 
590   /**
591    * Creates a new table.
592    * Synchronous operation.
593    *
594    * @param desc table descriptor for table
595    *
596    * @throws IllegalArgumentException if the table name is reserved
597    * @throws MasterNotRunningException if master is not running
598    * @throws TableExistsException if table already exists (If concurrent
599    * threads, the table may have been created between test-for-existence
600    * and attempt-at-creation).
601    * @throws IOException if a remote or network exception occurs
602    */
603   @Override
604   public void createTable(HTableDescriptor desc)
605   throws IOException {
606     createTable(desc, null);
607   }
608 
609   /**
610    * Creates a new table with the specified number of regions.  The start key
611    * specified will become the end key of the first region of the table, and
612    * the end key specified will become the start key of the last region of the
613    * table (the first region has a null start key and the last region has a
614    * null end key).
615    *
616    * BigInteger math will be used to divide the key range specified into
617    * enough segments to make the required number of total regions.
618    *
619    * Synchronous operation.
620    *
621    * @param desc table descriptor for table
622    * @param startKey beginning of key range
623    * @param endKey end of key range
624    * @param numRegions the total number of regions to create
625    *
626    * @throws IllegalArgumentException if the table name is reserved
627    * @throws MasterNotRunningException if master is not running
628    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
629    * threads, the table may have been created between test-for-existence
630    * and attempt-at-creation).
631    * @throws IOException
632    */
633   @Override
634   public void createTable(HTableDescriptor desc, byte [] startKey,
635       byte [] endKey, int numRegions)
636   throws IOException {
637     if(numRegions < 3) {
638       throw new IllegalArgumentException("Must create at least three regions");
639     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
640       throw new IllegalArgumentException("Start key must be smaller than end key");
641     }
642     if (numRegions == 3) {
643       createTable(desc, new byte[][]{startKey, endKey});
644       return;
645     }
646     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
647     if(splitKeys == null || splitKeys.length != numRegions - 1) {
648       throw new IllegalArgumentException("Unable to split key range into enough regions");
649     }
650     createTable(desc, splitKeys);
651   }
652 
653   /**
654    * Creates a new table with an initial set of empty regions defined by the
655    * specified split keys.  The total number of regions created will be the
656    * number of split keys plus one. Synchronous operation.
657    * Note : Avoid passing empty split key.
658    *
659    * @param desc table descriptor for table
660    * @param splitKeys array of split keys for the initial regions of the table
661    *
662    * @throws IllegalArgumentException if the table name is reserved, if the split keys
663    * are repeated and if the split key has empty byte array.
664    * @throws MasterNotRunningException if master is not running
665    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
666    * threads, the table may have been created between test-for-existence
667    * and attempt-at-creation).
668    * @throws IOException
669    */
670   @Override
671   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
672       throws IOException {
673     Future<Void> future = createTableAsyncV2(desc, splitKeys);
674     try {
675       // TODO: how long should we wait? spin forever?
676       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
677     } catch (InterruptedException e) {
678       throw new InterruptedIOException("Interrupted when waiting" +
679           " for table to be enabled; meta scan was done");
680     } catch (TimeoutException e) {
681       throw new TimeoutIOException(e);
682     } catch (ExecutionException e) {
683       if (e.getCause() instanceof IOException) {
684         throw (IOException)e.getCause();
685       } else {
686         throw new IOException(e.getCause());
687       }
688     }
689   }
690 
691   /**
692    * Creates a new table but does not block and wait for it to come online.
693    * Asynchronous operation.  To check if the table exists, use
694    * {@link #isTableAvailable} -- it is not safe to create an HTable
695    * instance to this table before it is available.
696    * Note : Avoid passing empty split key.
697    * @param desc table descriptor for table
698    *
699    * @throws IllegalArgumentException Bad table name, if the split keys
700    * are repeated and if the split key has empty byte array.
701    * @throws MasterNotRunningException if master is not running
702    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
703    * threads, the table may have been created between test-for-existence
704    * and attempt-at-creation).
705    * @throws IOException
706    */
707   @Override
708   public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys)
709       throws IOException {
710     createTableAsyncV2(desc, splitKeys);
711   }
712 
713   /**
714    * Creates a new table but does not block and wait for it to come online.
715    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
716    * It may throw ExecutionException if there was an error while executing the operation
717    * or TimeoutException in case the wait timeout was not long enough to allow the
718    * operation to complete.
719    *
720    * @param desc table descriptor for table
721    * @param splitKeys keys to check if the table has been created with all split keys
722    * @throws IllegalArgumentException Bad table name, if the split keys
723    *    are repeated and if the split key has empty byte array.
724    * @throws IOException if a remote or network exception occurs
725    * @return the result of the async creation. You can use Future.get(long, TimeUnit)
726    *    to wait on the operation to complete.
727    */
728   // TODO: This should be called Async but it will break binary compatibility
729   private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
730       throws IOException {
731     if (desc.getTableName() == null) {
732       throw new IllegalArgumentException("TableName cannot be null");
733     }
734     if (splitKeys != null && splitKeys.length > 0) {
735       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
736       // Verify there are no duplicate split keys
737       byte[] lastKey = null;
738       for (byte[] splitKey : splitKeys) {
739         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
740           throw new IllegalArgumentException(
741               "Empty split key must not be passed in the split keys.");
742         }
743         if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
744           throw new IllegalArgumentException("All split keys must be unique, " +
745             "found duplicate: " + Bytes.toStringBinary(splitKey) +
746             ", " + Bytes.toStringBinary(lastKey));
747         }
748         lastKey = splitKey;
749       }
750     }
751 
752     CreateTableResponse response = executeCallable(
753         new MasterCallable<CreateTableResponse>(getConnection()) {
754       @Override
755       public CreateTableResponse call(int callTimeout) throws ServiceException {
756         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
757         controller.setCallTimeout(callTimeout);
758         controller.setPriority(desc.getTableName());
759         CreateTableRequest request = RequestConverter.buildCreateTableRequest(
760             desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
761         return master.createTable(controller, request);
762       }
763     });
764     return new CreateTableFuture(this, desc, splitKeys, response);
765   }
766 
767   private static class CreateTableFuture extends ProcedureFuture<Void> {
768     private final HTableDescriptor desc;
769     private final byte[][] splitKeys;
770 
771     public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc,
772         final byte[][] splitKeys, final CreateTableResponse response) {
773       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
774       this.splitKeys = splitKeys;
775       this.desc = desc;
776     }
777 
778     @Override
779     protected Void waitOperationResult(final long deadlineTs)
780         throws IOException, TimeoutException {
781       waitForTableEnabled(deadlineTs);
782       waitForAllRegionsOnline(deadlineTs);
783       return null;
784     }
785 
786     @Override
787     protected Void postOperationResult(final Void result, final long deadlineTs)
788         throws IOException, TimeoutException {
789       LOG.info("Created " + desc.getTableName());
790       return result;
791     }
792 
793     private void waitForTableEnabled(final long deadlineTs)
794         throws IOException, TimeoutException {
795       waitForState(deadlineTs, new WaitForStateCallable() {
796         @Override
797         public boolean checkState(int tries) throws IOException {
798           try {
799             if (getAdmin().isTableAvailable(desc.getTableName())) {
800               return true;
801             }
802           } catch (TableNotFoundException tnfe) {
803             LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+  tries);
804           }
805           return false;
806         }
807 
808         @Override
809         public void throwInterruptedException() throws InterruptedIOException {
810           throw new InterruptedIOException("Interrupted when waiting for table " +
811               desc.getTableName() + " to be enabled");
812         }
813 
814         @Override
815         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
816           throw new TimeoutException("Table " + desc.getTableName() +
817             " not enabled after " + elapsedTime + "msec");
818         }
819       });
820     }
821 
822     private void waitForAllRegionsOnline(final long deadlineTs)
823         throws IOException, TimeoutException {
824       final AtomicInteger actualRegCount = new AtomicInteger(0);
825       final MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
826         @Override
827         public boolean processRow(Result rowResult) throws IOException {
828           RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
829           if (list == null) {
830             LOG.warn("No serialized HRegionInfo in " + rowResult);
831             return true;
832           }
833           HRegionLocation l = list.getRegionLocation();
834           if (l == null) {
835             return true;
836           }
837           if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
838             return false;
839           }
840           if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
841           HRegionLocation[] locations = list.getRegionLocations();
842           for (HRegionLocation location : locations) {
843             if (location == null) continue;
844             ServerName serverName = location.getServerName();
845             // Make sure that regions are assigned to server
846             if (serverName != null && serverName.getHostAndPort() != null) {
847               actualRegCount.incrementAndGet();
848             }
849           }
850           return true;
851         }
852       };
853 
854       int tries = 0;
855       IOException serverEx = null;
856       int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
857       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
858         actualRegCount.set(0);
859         MetaScanner.metaScan(getAdmin().getConnection(), visitor, desc.getTableName());
860         if (actualRegCount.get() == numRegs) {
861           // all the regions are online
862           return;
863         }
864 
865         try {
866           Thread.sleep(getPauseTime(tries++, getAdmin().pause));
867         } catch (InterruptedException e) {
868           throw new InterruptedIOException("Interrupted when opening" +
869             " regions; " + actualRegCount.get() + " of " + numRegs +
870             " regions processed so far");
871         }
872       }
873       throw new TimeoutException("Only " + actualRegCount.get() +
874               " of " + numRegs + " regions are online; retries exhausted.");
875     }
876   }
877 
878   public void deleteTable(final String tableName) throws IOException {
879     deleteTable(TableName.valueOf(tableName));
880   }
881 
882   public void deleteTable(final byte[] tableName) throws IOException {
883     deleteTable(TableName.valueOf(tableName));
884   }
885 
886   /**
887    * Deletes a table.
888    * Synchronous operation.
889    *
890    * @param tableName name of table to delete
891    * @throws IOException if a remote or network exception occurs
892    */
893   @Override
894   public void deleteTable(final TableName tableName) throws IOException {
895     Future<Void> future = deleteTableAsyncV2(tableName);
896     try {
897       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
898     } catch (InterruptedException e) {
899       throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
900     } catch (TimeoutException e) {
901       throw new TimeoutIOException(e);
902     } catch (ExecutionException e) {
903       if (e.getCause() instanceof IOException) {
904         throw (IOException)e.getCause();
905       } else {
906         throw new IOException(e.getCause());
907       }
908     }
909   }
910 
911   /**
912    * Deletes the table but does not block and wait for it be completely removed.
913    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
914    * It may throw ExecutionException if there was an error while executing the operation
915    * or TimeoutException in case the wait timeout was not long enough to allow the
916    * operation to complete.
917    *
918    * @param tableName name of table to delete
919    * @throws IOException if a remote or network exception occurs
920    * @return the result of the async delete. You can use Future.get(long, TimeUnit)
921    *    to wait on the operation to complete.
922    */
923   // TODO: This should be called Async but it will break binary compatibility
924   private Future<Void> deleteTableAsyncV2(final TableName tableName) throws IOException {
925     DeleteTableResponse response = executeCallable(
926         new MasterCallable<DeleteTableResponse>(getConnection()) {
927       @Override
928       public DeleteTableResponse call(int callTimeout) throws ServiceException {
929         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
930         controller.setCallTimeout(callTimeout);
931         controller.setPriority(tableName);
932         DeleteTableRequest req =
933             RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce());
934         return master.deleteTable(controller,req);
935       }
936     });
937     return new DeleteTableFuture(this, tableName, response);
938   }
939 
940   private static class DeleteTableFuture extends ProcedureFuture<Void> {
941     private final TableName tableName;
942 
943     public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
944         final DeleteTableResponse response) {
945       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
946       this.tableName = tableName;
947     }
948 
949     @Override
950     protected Void waitOperationResult(final long deadlineTs)
951         throws IOException, TimeoutException {
952       waitTableNotFound(deadlineTs);
953       return null;
954     }
955 
956     @Override
957     protected Void postOperationResult(final Void result, final long deadlineTs)
958         throws IOException, TimeoutException {
959       // Delete cached information to prevent clients from using old locations
960       getAdmin().getConnection().clearRegionCache(tableName);
961       LOG.info("Deleted " + tableName);
962       return result;
963     }
964 
965     private void waitTableNotFound(final long deadlineTs)
966         throws IOException, TimeoutException {
967       waitForState(deadlineTs, new WaitForStateCallable() {
968         @Override
969         public boolean checkState(int tries) throws IOException {
970           return !getAdmin().tableExists(tableName);
971         }
972 
973         @Override
974         public void throwInterruptedException() throws InterruptedIOException {
975           throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
976         }
977 
978         @Override
979         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
980           throw new TimeoutException("Table " + tableName + " not yet deleted after " +
981               elapsedTime + "msec");
982         }
983       });
984     }
985   }
986 
987   /**
988    * Deletes tables matching the passed in pattern and wait on completion.
989    *
990    * Warning: Use this method carefully, there is no prompting and the effect is
991    * immediate. Consider using {@link #listTables(java.lang.String)} and
992    * {@link #deleteTable(byte[])}
993    *
994    * @param regex The regular expression to match table names against
995    * @return Table descriptors for tables that couldn't be deleted
996    * @throws IOException
997    * @see #deleteTables(java.util.regex.Pattern)
998    * @see #deleteTable(java.lang.String)
999    */
1000   @Override
1001   public HTableDescriptor[] deleteTables(String regex) throws IOException {
1002     return deleteTables(Pattern.compile(regex));
1003   }
1004 
1005   /**
1006    * Delete tables matching the passed in pattern and wait on completion.
1007    *
1008    * Warning: Use this method carefully, there is no prompting and the effect is
1009    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1010    * {@link #deleteTable(byte[])}
1011    *
1012    * @param pattern The pattern to match table names against
1013    * @return Table descriptors for tables that couldn't be deleted
1014    * @throws IOException
1015    */
1016   @Override
1017   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
1018     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1019     for (HTableDescriptor table : listTables(pattern)) {
1020       try {
1021         deleteTable(table.getTableName());
1022       } catch (IOException ex) {
1023         LOG.info("Failed to delete table " + table.getTableName(), ex);
1024         failed.add(table);
1025       }
1026     }
1027     return failed.toArray(new HTableDescriptor[failed.size()]);
1028   }
1029 
1030   /**
1031    * Truncate a table.
1032    * Synchronous operation.
1033    *
1034    * @param tableName name of table to truncate
1035    * @param preserveSplits True if the splits should be preserved
1036    * @throws IOException if a remote or network exception occurs
1037    */
1038   @Override
1039   public void truncateTable(final TableName tableName, final boolean preserveSplits)
1040       throws IOException {
1041     executeCallable(new MasterCallable<Void>(getConnection()) {
1042       @Override
1043       public Void call(int callTimeout) throws ServiceException {
1044         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
1045           tableName, preserveSplits, ng.getNonceGroup(), ng.newNonce());
1046         master.truncateTable(null, req);
1047         return null;
1048       }
1049     });
1050   }
1051 
1052   /**
1053    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
1054    * and {@link #isTableEnabled(byte[])} instead.
1055    * The table has to be in disabled state for it to be enabled.
1056    * @param tableName name of the table
1057    * @throws IOException if a remote or network exception occurs
1058    * There could be couple types of IOException
1059    * TableNotFoundException means the table doesn't exist.
1060    * TableNotDisabledException means the table isn't in disabled state.
1061    * @see #isTableEnabled(byte[])
1062    * @see #disableTable(byte[])
1063    * @see #enableTableAsync(byte[])
1064    */
1065   @Override
1066   public void enableTable(final TableName tableName)
1067   throws IOException {
1068     Future<Void> future = enableTableAsyncV2(tableName);
1069     try {
1070       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1071     } catch (InterruptedException e) {
1072       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1073     } catch (TimeoutException e) {
1074       throw new TimeoutIOException(e);
1075     } catch (ExecutionException e) {
1076       if (e.getCause() instanceof IOException) {
1077         throw (IOException)e.getCause();
1078       } else {
1079         throw new IOException(e.getCause());
1080       }
1081     }
1082   }
1083 
1084   public void enableTable(final byte[] tableName)
1085   throws IOException {
1086     enableTable(TableName.valueOf(tableName));
1087   }
1088 
1089   public void enableTable(final String tableName)
1090   throws IOException {
1091     enableTable(TableName.valueOf(tableName));
1092   }
1093 
1094   /**
1095    * Wait for the table to be enabled and available
1096    * If enabling the table exceeds the retry period, an exception is thrown.
1097    * @param tableName name of the table
1098    * @throws IOException if a remote or network exception occurs or
1099    *    table is not enabled after the retries period.
1100    */
1101   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
1102     boolean enabled = false;
1103     long start = EnvironmentEdgeManager.currentTime();
1104     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
1105       try {
1106         enabled = isTableEnabled(tableName);
1107       } catch (TableNotFoundException tnfe) {
1108         // wait for table to be created
1109         enabled = false;
1110       }
1111       enabled = enabled && isTableAvailable(tableName);
1112       if (enabled) {
1113         break;
1114       }
1115       long sleep = getPauseTime(tries, pause);
1116       if (LOG.isDebugEnabled()) {
1117         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
1118           "enabled in " + tableName);
1119       }
1120       try {
1121         Thread.sleep(sleep);
1122       } catch (InterruptedException e) {
1123         // Do this conversion rather than let it out because do not want to
1124         // change the method signature.
1125         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1126       }
1127     }
1128     if (!enabled) {
1129       long msec = EnvironmentEdgeManager.currentTime() - start;
1130       throw new IOException("Table '" + tableName +
1131         "' not yet enabled, after " + msec + "ms.");
1132     }
1133   }
1134 
1135   /**
1136    * Brings a table on-line (enables it).  Method returns immediately though
1137    * enable of table may take some time to complete, especially if the table
1138    * is large (All regions are opened as part of enabling process).  Check
1139    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
1140    * table is taking too long to online, check server logs.
1141    * @param tableName
1142    * @throws IOException
1143    * @since 0.90.0
1144    */
1145   @Override
1146   public void enableTableAsync(final TableName tableName)
1147   throws IOException {
1148     enableTableAsyncV2(tableName);
1149   }
1150 
1151   public void enableTableAsync(final byte[] tableName)
1152   throws IOException {
1153     enableTable(TableName.valueOf(tableName));
1154   }
1155 
1156   public void enableTableAsync(final String tableName)
1157   throws IOException {
1158     enableTableAsync(TableName.valueOf(tableName));
1159   }
1160 
1161   /**
1162    * Enable the table but does not block and wait for it be completely enabled.
1163    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1164    * It may throw ExecutionException if there was an error while executing the operation
1165    * or TimeoutException in case the wait timeout was not long enough to allow the
1166    * operation to complete.
1167    *
1168    * @param tableName name of table to delete
1169    * @throws IOException if a remote or network exception occurs
1170    * @return the result of the async enable. You can use Future.get(long, TimeUnit)
1171    *    to wait on the operation to complete.
1172    */
1173   // TODO: This should be called Async but it will break binary compatibility
1174   private Future<Void> enableTableAsyncV2(final TableName tableName) throws IOException {
1175     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1176     EnableTableResponse response = executeCallable(
1177         new MasterCallable<EnableTableResponse>(getConnection()) {
1178       @Override
1179       public EnableTableResponse call(int callTimeout) throws ServiceException {
1180         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1181         controller.setCallTimeout(callTimeout);
1182         controller.setPriority(tableName);
1183 
1184         LOG.info("Started enable of " + tableName);
1185         EnableTableRequest req =
1186             RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce());
1187         return master.enableTable(controller, req);
1188       }
1189     });
1190     return new EnableTableFuture(this, tableName, response);
1191   }
1192 
1193   private static class EnableTableFuture extends ProcedureFuture<Void> {
1194     private final TableName tableName;
1195 
1196     public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
1197         final EnableTableResponse response) {
1198       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1199       this.tableName = tableName;
1200     }
1201 
1202     @Override
1203     protected Void waitOperationResult(final long deadlineTs)
1204         throws IOException, TimeoutException {
1205       waitTableEnabled(deadlineTs);
1206       return null;
1207     }
1208 
1209     @Override
1210     protected Void postOperationResult(final Void result, final long deadlineTs)
1211         throws IOException, TimeoutException {
1212       LOG.info("Enabled " + tableName);
1213       return result;
1214     }
1215 
1216     private void waitTableEnabled(final long deadlineTs)
1217         throws IOException, TimeoutException {
1218       waitForState(deadlineTs, new WaitForStateCallable() {
1219         @Override
1220         public boolean checkState(int tries) throws IOException {
1221           boolean enabled;
1222           try {
1223             enabled = getAdmin().isTableEnabled(tableName);
1224           } catch (TableNotFoundException tnfe) {
1225             return false;
1226           }
1227           return enabled && getAdmin().isTableAvailable(tableName);
1228         }
1229 
1230         @Override
1231         public void throwInterruptedException() throws InterruptedIOException {
1232           throw new InterruptedIOException("Interrupted when waiting for table to be enabled");
1233         }
1234 
1235         @Override
1236         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1237           throw new TimeoutException("Table " + tableName + " not yet enabled after " +
1238               elapsedTime + "msec");
1239         }
1240       });
1241     }
1242   }
1243 
1244   /**
1245    * Enable tables matching the passed in pattern and wait on completion.
1246    *
1247    * Warning: Use this method carefully, there is no prompting and the effect is
1248    * immediate. Consider using {@link #listTables(java.lang.String)} and
1249    * {@link #enableTable(byte[])}
1250    *
1251    * @param regex The regular expression to match table names against
1252    * @throws IOException
1253    * @see #enableTables(java.util.regex.Pattern)
1254    * @see #enableTable(java.lang.String)
1255    */
1256   @Override
1257   public HTableDescriptor[] enableTables(String regex) throws IOException {
1258     return enableTables(Pattern.compile(regex));
1259   }
1260 
1261   /**
1262    * Enable tables matching the passed in pattern and wait on completion.
1263    *
1264    * Warning: Use this method carefully, there is no prompting and the effect is
1265    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1266    * {@link #enableTable(byte[])}
1267    *
1268    * @param pattern The pattern to match table names against
1269    * @throws IOException
1270    */
1271   @Override
1272   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
1273     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1274     for (HTableDescriptor table : listTables(pattern)) {
1275       if (isTableDisabled(table.getTableName())) {
1276         try {
1277           enableTable(table.getTableName());
1278         } catch (IOException ex) {
1279           LOG.info("Failed to enable table " + table.getTableName(), ex);
1280           failed.add(table);
1281         }
1282       }
1283     }
1284     return failed.toArray(new HTableDescriptor[failed.size()]);
1285   }
1286 
1287   /**
1288    * Starts the disable of a table.  If it is being served, the master
1289    * will tell the servers to stop serving it.  This method returns immediately.
1290    * The disable of a table can take some time if the table is large (all
1291    * regions are closed as part of table disable operation).
1292    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
1293    * If table is taking too long to online, check server logs.
1294    * @param tableName name of table
1295    * @throws IOException if a remote or network exception occurs
1296    * @see #isTableDisabled(byte[])
1297    * @see #isTableEnabled(byte[])
1298    * @since 0.90.0
1299    */
1300   @Override
1301   public void disableTableAsync(final TableName tableName) throws IOException {
1302     disableTableAsyncV2(tableName);
1303   }
1304 
1305   public void disableTableAsync(final byte[] tableName) throws IOException {
1306     disableTableAsync(TableName.valueOf(tableName));
1307   }
1308 
1309   public void disableTableAsync(final String tableName) throws IOException {
1310     disableTableAsync(TableName.valueOf(tableName));
1311   }
1312 
1313   /**
1314    * Disable table and wait on completion.  May timeout eventually.  Use
1315    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
1316    * instead.
1317    * The table has to be in enabled state for it to be disabled.
1318    * @param tableName
1319    * @throws IOException
1320    * There could be couple types of IOException
1321    * TableNotFoundException means the table doesn't exist.
1322    * TableNotEnabledException means the table isn't in enabled state.
1323    */
1324   @Override
1325   public void disableTable(final TableName tableName)
1326   throws IOException {
1327     Future<Void> future = disableTableAsyncV2(tableName);
1328     try {
1329       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1330     } catch (InterruptedException e) {
1331       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1332     } catch (TimeoutException e) {
1333       throw new TimeoutIOException(e);
1334     } catch (ExecutionException e) {
1335       if (e.getCause() instanceof IOException) {
1336         throw (IOException)e.getCause();
1337       } else {
1338         throw new IOException(e.getCause());
1339       }
1340     }
1341   }
1342 
1343   public void disableTable(final byte[] tableName)
1344   throws IOException {
1345     disableTable(TableName.valueOf(tableName));
1346   }
1347 
1348   public void disableTable(final String tableName)
1349   throws IOException {
1350     disableTable(TableName.valueOf(tableName));
1351   }
1352 
1353   /**
1354    * Disable the table but does not block and wait for it be completely disabled.
1355    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1356    * It may throw ExecutionException if there was an error while executing the operation
1357    * or TimeoutException in case the wait timeout was not long enough to allow the
1358    * operation to complete.
1359    *
1360    * @param tableName name of table to delete
1361    * @throws IOException if a remote or network exception occurs
1362    * @return the result of the async disable. You can use Future.get(long, TimeUnit)
1363    *    to wait on the operation to complete.
1364    */
1365   // TODO: This should be called Async but it will break binary compatibility
1366   private Future<Void> disableTableAsyncV2(final TableName tableName) throws IOException {
1367     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1368     DisableTableResponse response = executeCallable(
1369         new MasterCallable<DisableTableResponse>(getConnection()) {
1370       @Override
1371       public DisableTableResponse call(int callTimeout) throws ServiceException {
1372         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1373         controller.setCallTimeout(callTimeout);
1374         controller.setPriority(tableName);
1375 
1376         LOG.info("Started disable of " + tableName);
1377         DisableTableRequest req =
1378             RequestConverter.buildDisableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce());
1379         return master.disableTable(controller, req);
1380       }
1381     });
1382     return new DisableTableFuture(this, tableName, response);
1383   }
1384 
1385   private static class DisableTableFuture extends ProcedureFuture<Void> {
1386     private final TableName tableName;
1387 
1388     public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
1389         final DisableTableResponse response) {
1390       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1391       this.tableName = tableName;
1392     }
1393 
1394     @Override
1395     protected Void waitOperationResult(final long deadlineTs)
1396         throws IOException, TimeoutException {
1397       waitTableDisabled(deadlineTs);
1398       return null;
1399     }
1400 
1401     @Override
1402     protected Void postOperationResult(final Void result, final long deadlineTs)
1403         throws IOException, TimeoutException {
1404       LOG.info("Disabled " + tableName);
1405       return result;
1406     }
1407 
1408     private void waitTableDisabled(final long deadlineTs)
1409         throws IOException, TimeoutException {
1410       waitForState(deadlineTs, new WaitForStateCallable() {
1411         @Override
1412         public boolean checkState(int tries) throws IOException {
1413           return getAdmin().isTableDisabled(tableName);
1414         }
1415 
1416         @Override
1417         public void throwInterruptedException() throws InterruptedIOException {
1418           throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1419         }
1420 
1421         @Override
1422         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1423           throw new TimeoutException("Table " + tableName + " not yet disabled after " +
1424               elapsedTime + "msec");
1425         }
1426       });
1427     }
1428   }
1429 
1430   /**
1431    * Disable tables matching the passed in pattern and wait on completion.
1432    *
1433    * Warning: Use this method carefully, there is no prompting and the effect is
1434    * immediate. Consider using {@link #listTables(java.lang.String)} and
1435    * {@link #disableTable(byte[])}
1436    *
1437    * @param regex The regular expression to match table names against
1438    * @return Table descriptors for tables that couldn't be disabled
1439    * @throws IOException
1440    * @see #disableTables(java.util.regex.Pattern)
1441    * @see #disableTable(java.lang.String)
1442    */
1443   @Override
1444   public HTableDescriptor[] disableTables(String regex) throws IOException {
1445     return disableTables(Pattern.compile(regex));
1446   }
1447 
1448   /**
1449    * Disable tables matching the passed in pattern and wait on completion.
1450    *
1451    * Warning: Use this method carefully, there is no prompting and the effect is
1452    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1453    * {@link #disableTable(byte[])}
1454    *
1455    * @param pattern The pattern to match table names against
1456    * @return Table descriptors for tables that couldn't be disabled
1457    * @throws IOException
1458    */
1459   @Override
1460   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1461     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1462     for (HTableDescriptor table : listTables(pattern)) {
1463       if (isTableEnabled(table.getTableName())) {
1464         try {
1465           disableTable(table.getTableName());
1466         } catch (IOException ex) {
1467           LOG.info("Failed to disable table " + table.getTableName(), ex);
1468           failed.add(table);
1469         }
1470       }
1471     }
1472     return failed.toArray(new HTableDescriptor[failed.size()]);
1473   }
1474 
1475   /*
1476    * Checks whether table exists. If not, throws TableNotFoundException
1477    * @param tableName
1478    */
1479   private void checkTableExistence(TableName tableName) throws IOException {
1480     if (!tableExists(tableName)) {
1481       throw new TableNotFoundException(tableName);
1482     }
1483   }
1484 
1485   /**
1486    * @param tableName name of table to check
1487    * @return true if table is on-line
1488    * @throws IOException if a remote or network exception occurs
1489    */
1490   @Override
1491   public boolean isTableEnabled(TableName tableName) throws IOException {
1492     checkTableExistence(tableName);
1493     return connection.isTableEnabled(tableName);
1494   }
1495 
1496   public boolean isTableEnabled(byte[] tableName) throws IOException {
1497     return isTableEnabled(TableName.valueOf(tableName));
1498   }
1499 
1500   public boolean isTableEnabled(String tableName) throws IOException {
1501     return isTableEnabled(TableName.valueOf(tableName));
1502   }
1503 
1504 
1505 
1506   /**
1507    * @param tableName name of table to check
1508    * @return true if table is off-line
1509    * @throws IOException if a remote or network exception occurs
1510    */
1511   @Override
1512   public boolean isTableDisabled(TableName tableName) throws IOException {
1513     checkTableExistence(tableName);
1514     return connection.isTableDisabled(tableName);
1515   }
1516 
1517   public boolean isTableDisabled(byte[] tableName) throws IOException {
1518     return isTableDisabled(TableName.valueOf(tableName));
1519   }
1520 
1521   public boolean isTableDisabled(String tableName) throws IOException {
1522     return isTableDisabled(TableName.valueOf(tableName));
1523   }
1524 
1525   /**
1526    * @param tableName name of table to check
1527    * @return true if all regions of the table are available
1528    * @throws IOException if a remote or network exception occurs
1529    */
1530   @Override
1531   public boolean isTableAvailable(TableName tableName) throws IOException {
1532     return connection.isTableAvailable(tableName);
1533   }
1534 
1535   public boolean isTableAvailable(byte[] tableName) throws IOException {
1536     return isTableAvailable(TableName.valueOf(tableName));
1537   }
1538 
1539   public boolean isTableAvailable(String tableName) throws IOException {
1540     return isTableAvailable(TableName.valueOf(tableName));
1541   }
1542 
1543   /**
1544    * Use this api to check if the table has been created with the specified number of
1545    * splitkeys which was used while creating the given table.
1546    * Note : If this api is used after a table's region gets splitted, the api may return
1547    * false.
1548    * @param tableName
1549    *          name of table to check
1550    * @param splitKeys
1551    *          keys to check if the table has been created with all split keys
1552    * @throws IOException
1553    *           if a remote or network excpetion occurs
1554    */
1555   @Override
1556   public boolean isTableAvailable(TableName tableName,
1557                                   byte[][] splitKeys) throws IOException {
1558     return connection.isTableAvailable(tableName, splitKeys);
1559   }
1560 
1561   public boolean isTableAvailable(byte[] tableName,
1562                                   byte[][] splitKeys) throws IOException {
1563     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1564   }
1565 
1566   public boolean isTableAvailable(String tableName,
1567                                   byte[][] splitKeys) throws IOException {
1568     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1569   }
1570 
1571   /**
1572    * Get the status of alter command - indicates how many regions have received
1573    * the updated schema Asynchronous operation.
1574    *
1575    * @param tableName TableName instance
1576    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1577    *         regions that are yet to be updated Pair.getSecond() is the total number
1578    *         of regions of the table
1579    * @throws IOException
1580    *           if a remote or network exception occurs
1581    */
1582   @Override
1583   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1584   throws IOException {
1585     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1586       @Override
1587       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1588         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1589         controller.setCallTimeout(callTimeout);
1590         controller.setPriority(tableName);
1591 
1592         GetSchemaAlterStatusRequest req = RequestConverter
1593             .buildGetSchemaAlterStatusRequest(tableName);
1594         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(controller, req);
1595         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1596             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1597         return pair;
1598       }
1599     });
1600   }
1601 
1602   /**
1603    * Get the status of alter command - indicates how many regions have received
1604    * the updated schema Asynchronous operation.
1605    *
1606    * @param tableName
1607    *          name of the table to get the status of
1608    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1609    *         regions that are yet to be updated Pair.getSecond() is the total number
1610    *         of regions of the table
1611    * @throws IOException
1612    *           if a remote or network exception occurs
1613    */
1614   @Override
1615   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1616    throws IOException {
1617     return getAlterStatus(TableName.valueOf(tableName));
1618   }
1619 
1620   /**
1621    * Add a column to an existing table.
1622    * Asynchronous operation.
1623    *
1624    * @param tableName name of the table to add column to
1625    * @param column column descriptor of column to be added
1626    * @throws IOException if a remote or network exception occurs
1627    */
1628   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1629   throws IOException {
1630     addColumn(TableName.valueOf(tableName), column);
1631   }
1632 
1633   /**
1634    * Add a column to an existing table.
1635    * Asynchronous operation.
1636    *
1637    * @param tableName name of the table to add column to
1638    * @param column column descriptor of column to be added
1639    * @throws IOException if a remote or network exception occurs
1640    */
1641   public void addColumn(final String tableName, HColumnDescriptor column)
1642   throws IOException {
1643     addColumn(TableName.valueOf(tableName), column);
1644   }
1645 
1646   /**
1647    * Add a column to an existing table.
1648    * Asynchronous operation.
1649    *
1650    * @param tableName name of the table to add column to
1651    * @param column column descriptor of column to be added
1652    * @throws IOException if a remote or network exception occurs
1653    */
1654   @Override
1655   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1656   throws IOException {
1657     executeCallable(new MasterCallable<Void>(getConnection()) {
1658       @Override
1659       public Void call(int callTimeout) throws ServiceException {
1660         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1661         controller.setCallTimeout(callTimeout);
1662         controller.setPriority(tableName);
1663         AddColumnRequest req = RequestConverter.buildAddColumnRequest(
1664           tableName, column, ng.getNonceGroup(), ng.newNonce());
1665         master.addColumn(controller, req);
1666         return null;
1667       }
1668     });
1669   }
1670 
1671   /**
1672    * Delete a column from a table.
1673    * Asynchronous operation.
1674    *
1675    * @param tableName name of table
1676    * @param columnName name of column to be deleted
1677    * @throws IOException if a remote or network exception occurs
1678    */
1679   public void deleteColumn(final byte[] tableName, final String columnName)
1680   throws IOException {
1681     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1682   }
1683 
1684   /**
1685    * Delete a column from a table.
1686    * Asynchronous operation.
1687    *
1688    * @param tableName name of table
1689    * @param columnName name of column to be deleted
1690    * @throws IOException if a remote or network exception occurs
1691    */
1692   public void deleteColumn(final String tableName, final String columnName)
1693   throws IOException {
1694     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1695   }
1696 
1697   /**
1698    * Delete a column from a table.
1699    * Asynchronous operation.
1700    *
1701    * @param tableName name of table
1702    * @param columnName name of column to be deleted
1703    * @throws IOException if a remote or network exception occurs
1704    */
1705   @Override
1706   public void deleteColumn(final TableName tableName, final byte [] columnName)
1707   throws IOException {
1708     executeCallable(new MasterCallable<Void>(getConnection()) {
1709       @Override
1710       public Void call(int callTimeout) throws ServiceException {
1711         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1712         controller.setCallTimeout(callTimeout);
1713         controller.setPriority(tableName);
1714         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(
1715                 tableName, columnName, ng.getNonceGroup(), ng.newNonce());
1716         master.deleteColumn(controller, req);
1717         return null;
1718       }
1719     });
1720   }
1721 
1722   /**
1723    * Modify an existing column family on a table.
1724    * Asynchronous operation.
1725    *
1726    * @param tableName name of table
1727    * @param descriptor new column descriptor to use
1728    * @throws IOException if a remote or network exception occurs
1729    */
1730   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1731   throws IOException {
1732     modifyColumn(TableName.valueOf(tableName), descriptor);
1733   }
1734 
1735   /**
1736    * Modify an existing column family on a table.
1737    * Asynchronous operation.
1738    *
1739    * @param tableName name of table
1740    * @param descriptor new column descriptor to use
1741    * @throws IOException if a remote or network exception occurs
1742    */
1743   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1744   throws IOException {
1745     modifyColumn(TableName.valueOf(tableName), descriptor);
1746   }
1747 
1748   /**
1749    * Modify an existing column family on a table.
1750    * Asynchronous operation.
1751    *
1752    * @param tableName name of table
1753    * @param descriptor new column descriptor to use
1754    * @throws IOException if a remote or network exception occurs
1755    */
1756   @Override
1757   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1758   throws IOException {
1759     executeCallable(new MasterCallable<Void>(getConnection()) {
1760       @Override
1761       public Void call(int callTimeout) throws ServiceException {
1762         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1763         controller.setCallTimeout(callTimeout);
1764         controller.setPriority(tableName);
1765         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(
1766                 tableName, descriptor, ng.getNonceGroup(), ng.newNonce());
1767         master.modifyColumn(controller, req);
1768         return null;
1769       }
1770     });
1771   }
1772 
1773   /**
1774    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1775    * master will not be informed of the close.
1776    * @param regionname region name to close
1777    * @param serverName If supplied, we'll use this location rather than
1778    * the one currently in <code>hbase:meta</code>
1779    * @throws IOException if a remote or network exception occurs
1780    */
1781   @Override
1782   public void closeRegion(final String regionname, final String serverName)
1783   throws IOException {
1784     closeRegion(Bytes.toBytes(regionname), serverName);
1785   }
1786 
1787   /**
1788    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1789    * master will not be informed of the close.
1790    * @param regionname region name to close
1791    * @param serverName The servername of the regionserver.  If passed null we
1792    * will use servername found in the hbase:meta table. A server name
1793    * is made of host, port and startcode.  Here is an example:
1794    * <code> host187.example.com,60020,1289493121758</code>
1795    * @throws IOException if a remote or network exception occurs
1796    */
1797   @Override
1798   public void closeRegion(final byte [] regionname, final String serverName)
1799       throws IOException {
1800     if (serverName != null) {
1801       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1802       if (pair == null || pair.getFirst() == null) {
1803         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1804       } else {
1805         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1806       }
1807     } else {
1808       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1809       if (pair == null) {
1810         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1811       } else if (pair.getSecond() == null) {
1812         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1813       } else {
1814         closeRegion(pair.getSecond(), pair.getFirst());
1815       }
1816     }
1817   }
1818 
1819   /**
1820    * For expert-admins. Runs close on the regionserver. Closes a region based on
1821    * the encoded region name. The region server name is mandatory. If the
1822    * servername is provided then based on the online regions in the specified
1823    * regionserver the specified region will be closed. The master will not be
1824    * informed of the close. Note that the regionname is the encoded regionname.
1825    *
1826    * @param encodedRegionName
1827    *          The encoded region name; i.e. the hash that makes up the region
1828    *          name suffix: e.g. if regionname is
1829    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1830    *          , then the encoded region name is:
1831    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1832    * @param serverName
1833    *          The servername of the regionserver. A server name is made of host,
1834    *          port and startcode. This is mandatory. Here is an example:
1835    *          <code> host187.example.com,60020,1289493121758</code>
1836    * @return true if the region was closed, false if not.
1837    * @throws IOException
1838    *           if a remote or network exception occurs
1839    */
1840   @Override
1841   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1842       final String serverName) throws IOException {
1843     if (null == serverName || ("").equals(serverName.trim())) {
1844       throw new IllegalArgumentException(
1845           "The servername cannot be null or empty.");
1846     }
1847     ServerName sn = ServerName.valueOf(serverName);
1848     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1849     // Close the region without updating zk state.
1850     CloseRegionRequest request =
1851       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
1852     try {
1853       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1854 
1855       // TODO: this does not do retries, it should. Set priority and timeout in controller
1856       CloseRegionResponse response = admin.closeRegion(controller, request);
1857       boolean isRegionClosed = response.getClosed();
1858       if (false == isRegionClosed) {
1859         LOG.error("Not able to close the region " + encodedRegionName + ".");
1860       }
1861       return isRegionClosed;
1862     } catch (ServiceException se) {
1863       throw ProtobufUtil.getRemoteException(se);
1864     }
1865   }
1866 
1867   /**
1868    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1869    * master will not be informed of the close.
1870    * @param sn
1871    * @param hri
1872    * @throws IOException
1873    */
1874   @Override
1875   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1876   throws IOException {
1877     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1878     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1879 
1880     // Close the region without updating zk state.
1881     ProtobufUtil.closeRegion(controller, admin, sn, hri.getRegionName(), false);
1882   }
1883 
1884   /**
1885    * Get all the online regions on a region server.
1886    */
1887   @Override
1888   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1889     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1890     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1891     return ProtobufUtil.getOnlineRegions(controller, admin);
1892   }
1893 
1894   /**
1895    * {@inheritDoc}
1896    */
1897   @Override
1898   public void flush(final TableName tableName) throws IOException {
1899     checkTableExists(tableName);
1900     if (isTableDisabled(tableName)) {
1901       LOG.info("Table is disabled: " + tableName.getNameAsString());
1902       return;
1903     }
1904     execProcedure("flush-table-proc", tableName.getNameAsString(),
1905       new HashMap<String, String>());
1906   }
1907 
1908   /**
1909    * {@inheritDoc}
1910    */
1911   @Override
1912   public void flushRegion(final byte[] regionName) throws IOException {
1913     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1914     if (regionServerPair == null) {
1915       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1916     }
1917     if (regionServerPair.getSecond() == null) {
1918       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1919     }
1920     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1921   }
1922 
1923   /**
1924    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1925    * (byte[])} instead.
1926    */
1927   @Deprecated
1928   public void flush(final String tableNameOrRegionName)
1929   throws IOException, InterruptedException {
1930     flush(Bytes.toBytes(tableNameOrRegionName));
1931   }
1932 
1933   /**
1934    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1935    * (byte[])} instead.
1936    */
1937   @Deprecated
1938   public void flush(final byte[] tableNameOrRegionName)
1939   throws IOException, InterruptedException {
1940     try {
1941       flushRegion(tableNameOrRegionName);
1942     } catch (IllegalArgumentException e) {
1943       // Unknown region.  Try table.
1944       flush(TableName.valueOf(tableNameOrRegionName));
1945     }
1946   }
1947 
1948   private void flush(final ServerName sn, final HRegionInfo hri)
1949   throws IOException {
1950     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1951     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1952     FlushRegionRequest request =
1953       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1954     try {
1955       admin.flushRegion(controller, request);
1956     } catch (ServiceException se) {
1957       throw ProtobufUtil.getRemoteException(se);
1958     }
1959   }
1960 
1961   /**
1962    * {@inheritDoc}
1963    */
1964   @Override
1965   public void compact(final TableName tableName)
1966     throws IOException {
1967     compact(tableName, null, false, CompactType.NORMAL);
1968   }
1969 
1970   /**
1971    * {@inheritDoc}
1972    */
1973   @Override
1974   public void compactRegion(final byte[] regionName)
1975     throws IOException {
1976     compactRegion(regionName, null, false);
1977   }
1978 
1979   /**
1980    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1981    * (byte[])} instead.
1982    */
1983   @Deprecated
1984   public void compact(final String tableNameOrRegionName)
1985   throws IOException {
1986     compact(Bytes.toBytes(tableNameOrRegionName));
1987   }
1988 
1989   /**
1990    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1991    * (byte[])} instead.
1992    */
1993   @Deprecated
1994   public void compact(final byte[] tableNameOrRegionName)
1995   throws IOException {
1996     try {
1997       compactRegion(tableNameOrRegionName, null, false);
1998     } catch (IllegalArgumentException e) {
1999       compact(TableName.valueOf(tableNameOrRegionName), null, false, CompactType.NORMAL);
2000     }
2001   }
2002 
2003   /**
2004    * {@inheritDoc}
2005    */
2006   @Override
2007   public void compact(final TableName tableName, final byte[] columnFamily)
2008     throws IOException {
2009     compact(tableName, columnFamily, false, CompactType.NORMAL);
2010   }
2011 
2012   /**
2013    * {@inheritDoc}
2014    */
2015   @Override
2016   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
2017     throws IOException {
2018     compactRegion(regionName, columnFamily, false);
2019   }
2020 
2021   /**
2022    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2023    * (byte[], byte[])} instead.
2024    */
2025   @Deprecated
2026   public void compact(String tableOrRegionName, String columnFamily)
2027     throws IOException {
2028     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
2029   }
2030 
2031   /**
2032    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2033    * (byte[], byte[])} instead.
2034    */
2035   @Deprecated
2036   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2037   throws IOException {
2038     try {
2039       compactRegion(tableNameOrRegionName, columnFamily, false);
2040     } catch (IllegalArgumentException e) {
2041       // Bad region, try table
2042       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false, CompactType.NORMAL);
2043     }
2044   }
2045 
2046   /**
2047    * {@inheritDoc}
2048    */
2049   @Override
2050   public void compactRegionServer(final ServerName sn, boolean major)
2051   throws IOException, InterruptedException {
2052     for (HRegionInfo region : getOnlineRegions(sn)) {
2053       compact(sn, region, major, null);
2054     }
2055   }
2056 
2057   /**
2058    * {@inheritDoc}
2059    */
2060   @Override
2061   public void majorCompact(final TableName tableName)
2062   throws IOException {
2063     compact(tableName, null, true, CompactType.NORMAL);
2064   }
2065 
2066   /**
2067    * {@inheritDoc}
2068    */
2069   @Override
2070   public void majorCompactRegion(final byte[] regionName)
2071   throws IOException {
2072     compactRegion(regionName, null, true);
2073   }
2074 
2075   /**
2076    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2077    * #majorCompactRegion(byte[])} instead.
2078    */
2079   @Deprecated
2080   public void majorCompact(final String tableNameOrRegionName)
2081   throws IOException {
2082     majorCompact(Bytes.toBytes(tableNameOrRegionName));
2083   }
2084 
2085   /**
2086    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2087    * #majorCompactRegion(byte[])} instead.
2088    */
2089   @Deprecated
2090   public void majorCompact(final byte[] tableNameOrRegionName)
2091   throws IOException {
2092     try {
2093       compactRegion(tableNameOrRegionName, null, true);
2094     } catch (IllegalArgumentException e) {
2095       // Invalid region, try table
2096       compact(TableName.valueOf(tableNameOrRegionName), null, true, CompactType.NORMAL);
2097     }
2098   }
2099 
2100   /**
2101    * {@inheritDoc}
2102    */
2103   @Override
2104   public void majorCompact(final TableName tableName, final byte[] columnFamily)
2105   throws IOException {
2106     compact(tableName, columnFamily, true, CompactType.NORMAL);
2107   }
2108 
2109   /**
2110    * {@inheritDoc}
2111    */
2112   @Override
2113   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
2114   throws IOException {
2115     compactRegion(regionName, columnFamily, true);
2116   }
2117 
2118   /**
2119    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2120    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2121    */
2122   @Deprecated
2123   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
2124   throws IOException {
2125     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
2126   }
2127 
2128   /**
2129    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2130    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2131    */
2132   @Deprecated
2133   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2134   throws IOException {
2135     try {
2136       compactRegion(tableNameOrRegionName, columnFamily, true);
2137     } catch (IllegalArgumentException e) {
2138       // Invalid region, try table
2139       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true, CompactType.NORMAL);
2140     }
2141   }
2142 
2143   /**
2144    * Compact a table.
2145    * Asynchronous operation.
2146    *
2147    * @param tableName table or region to compact
2148    * @param columnFamily column family within a table or region
2149    * @param major True if we are to do a major compaction.
2150    * @throws IOException if a remote or network exception occurs
2151    * @throws InterruptedException
2152    */
2153   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major,
2154       CompactType compactType) throws IOException {
2155     switch (compactType) {
2156     case MOB:
2157       ServerName master = getMasterAddress();
2158       compact(master, getMobRegionInfo(tableName), major, columnFamily);
2159       break;
2160     case NORMAL:
2161     default:
2162       ZooKeeperWatcher zookeeper = null;
2163       try {
2164         checkTableExists(tableName);
2165         zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2166           new ThrowableAbortable());
2167         List<Pair<HRegionInfo, ServerName>> pairs =
2168             MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2169         for (Pair<HRegionInfo, ServerName> pair: pairs) {
2170           if (pair.getFirst().isOffline()) continue;
2171           if (pair.getSecond() == null) continue;
2172           try {
2173             compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
2174           } catch (NotServingRegionException e) {
2175             if (LOG.isDebugEnabled()) {
2176               LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
2177                   pair.getFirst() + ": " +
2178                   StringUtils.stringifyException(e));
2179             }
2180           }
2181         }
2182       } finally {
2183         if (zookeeper != null) {
2184           zookeeper.close();
2185         }
2186       }
2187       break;
2188     }
2189   }
2190 
2191   /**
2192    * Compact an individual region.
2193    * Asynchronous operation.
2194    *
2195    * @param regionName region to compact
2196    * @param columnFamily column family within a table or region
2197    * @param major True if we are to do a major compaction.
2198    * @throws IOException if a remote or network exception occurs
2199    * @throws InterruptedException
2200    */
2201   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
2202   throws IOException {
2203     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2204     if (regionServerPair == null) {
2205       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2206     }
2207     if (regionServerPair.getSecond() == null) {
2208       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2209     }
2210     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
2211   }
2212 
2213   private void compact(final ServerName sn, final HRegionInfo hri,
2214       final boolean major, final byte [] family)
2215   throws IOException {
2216     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2217     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2218     CompactRegionRequest request =
2219       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
2220     try {
2221       // TODO: this does not do retries, it should. Set priority and timeout in controller
2222       admin.compactRegion(controller, request);
2223     } catch (ServiceException se) {
2224       throw ProtobufUtil.getRemoteException(se);
2225     }
2226   }
2227 
2228   /**
2229    * Move the region <code>r</code> to <code>dest</code>.
2230    * @param encodedRegionName The encoded region name; i.e. the hash that makes
2231    * up the region name suffix: e.g. if regionname is
2232    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
2233    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
2234    * @param destServerName The servername of the destination regionserver.  If
2235    * passed the empty byte array we'll assign to a random server.  A server name
2236    * is made of host, port and startcode.  Here is an example:
2237    * <code> host187.example.com,60020,1289493121758</code>
2238    * @throws UnknownRegionException Thrown if we can't find a region named
2239    * <code>encodedRegionName</code>
2240    */
2241   @Override
2242   public void move(final byte [] encodedRegionName, final byte [] destServerName)
2243       throws IOException {
2244 
2245     executeCallable(new MasterCallable<Void>(getConnection()) {
2246       @Override
2247       public Void call(int callTimeout) throws ServiceException {
2248         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2249         controller.setCallTimeout(callTimeout);
2250         // Hard to know the table name, at least check if meta
2251         if (isMetaRegion(encodedRegionName)) {
2252           controller.setPriority(TableName.META_TABLE_NAME);
2253         }
2254 
2255         try {
2256           MoveRegionRequest request =
2257               RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
2258             master.moveRegion(controller, request);
2259         } catch (DeserializationException de) {
2260           LOG.error("Could not parse destination server name: " + de);
2261           throw new ServiceException(new DoNotRetryIOException(de));
2262         }
2263         return null;
2264       }
2265     });
2266   }
2267 
2268   private boolean isMetaRegion(final byte[] regionName) {
2269     return Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2270         || Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
2271   }
2272 
2273   /**
2274    * @param regionName
2275    *          Region name to assign.
2276    * @throws MasterNotRunningException
2277    * @throws ZooKeeperConnectionException
2278    * @throws IOException
2279    */
2280   @Override
2281   public void assign(final byte[] regionName) throws MasterNotRunningException,
2282       ZooKeeperConnectionException, IOException {
2283     final byte[] toBeAssigned = getRegionName(regionName);
2284     executeCallable(new MasterCallable<Void>(getConnection()) {
2285       @Override
2286       public Void call(int callTimeout) throws ServiceException {
2287         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2288         controller.setCallTimeout(callTimeout);
2289         // Hard to know the table name, at least check if meta
2290         if (isMetaRegion(regionName)) {
2291           controller.setPriority(TableName.META_TABLE_NAME);
2292         }
2293 
2294         AssignRegionRequest request =
2295           RequestConverter.buildAssignRegionRequest(toBeAssigned);
2296         master.assignRegion(controller,request);
2297         return null;
2298       }
2299     });
2300   }
2301 
2302   /**
2303    * Unassign a region from current hosting regionserver.  Region will then be
2304    * assigned to a regionserver chosen at random.  Region could be reassigned
2305    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
2306    * to control the region movement.
2307    * @param regionName Region to unassign. Will clear any existing RegionPlan
2308    * if one found.
2309    * @param force If true, force unassign (Will remove region from
2310    * regions-in-transition too if present. If results in double assignment
2311    * use hbck -fix to resolve. To be used by experts).
2312    * @throws MasterNotRunningException
2313    * @throws ZooKeeperConnectionException
2314    * @throws IOException
2315    */
2316   @Override
2317   public void unassign(final byte [] regionName, final boolean force)
2318   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2319     final byte[] toBeUnassigned = getRegionName(regionName);
2320     executeCallable(new MasterCallable<Void>(getConnection()) {
2321       @Override
2322       public Void call(int callTimeout) throws ServiceException {
2323         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2324         controller.setCallTimeout(callTimeout);
2325         // Hard to know the table name, at least check if meta
2326         if (isMetaRegion(regionName)) {
2327           controller.setPriority(TableName.META_TABLE_NAME);
2328         }
2329         UnassignRegionRequest request =
2330           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
2331         master.unassignRegion(controller, request);
2332         return null;
2333       }
2334     });
2335   }
2336 
2337   /**
2338    * Offline specified region from master's in-memory state. It will not attempt to reassign the
2339    * region as in unassign. This API can be used when a region not served by any region server and
2340    * still online as per Master's in memory state. If this API is incorrectly used on active region
2341    * then master will loose track of that region.
2342    *
2343    * This is a special method that should be used by experts or hbck.
2344    *
2345    * @param regionName
2346    *          Region to offline.
2347    * @throws IOException
2348    */
2349   @Override
2350   public void offline(final byte [] regionName)
2351   throws IOException {
2352     executeCallable(new MasterCallable<Void>(getConnection()) {
2353       @Override
2354       public Void call(int callTimeout) throws ServiceException {
2355         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2356         controller.setCallTimeout(callTimeout);
2357         // Hard to know the table name, at least check if meta
2358         if (isMetaRegion(regionName)) {
2359           controller.setPriority(TableName.META_TABLE_NAME);
2360         }
2361         master.offlineRegion(controller, RequestConverter.buildOfflineRegionRequest(regionName));
2362         return null;
2363       }
2364     });
2365   }
2366 
2367   /**
2368    * Turn the load balancer on or off.
2369    * @param on If true, enable balancer. If false, disable balancer.
2370    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
2371    * @return Previous balancer value
2372    */
2373   @Override
2374   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
2375   throws IOException {
2376     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2377       @Override
2378       public Boolean call(int callTimeout) throws ServiceException {
2379         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2380         controller.setCallTimeout(callTimeout);
2381 
2382         SetBalancerRunningRequest req =
2383             RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
2384         return master.setBalancerRunning(controller, req).getPrevBalanceValue();
2385       }
2386     });
2387   }
2388 
2389   /**
2390    * Invoke the balancer.  Will run the balancer and if regions to move, it will
2391    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
2392    * logs.
2393    * @return True if balancer ran, false otherwise.
2394    */
2395   @Override
2396   public boolean balancer() throws IOException {
2397     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2398       @Override
2399       public Boolean call(int callTimeout) throws ServiceException {
2400         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2401         controller.setCallTimeout(callTimeout);
2402         return master.balance(controller, RequestConverter.buildBalanceRequest(false))
2403             .getBalancerRan();
2404       }
2405     });
2406   }
2407 
2408   @Override
2409   public boolean balancer(final boolean force) throws IOException {
2410     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2411       @Override
2412       public Boolean call(int callTimeout) throws ServiceException {
2413         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2414         controller.setCallTimeout(callTimeout);
2415         return master.balance(controller, RequestConverter.buildBalanceRequest(force))
2416             .getBalancerRan();
2417       }
2418     });
2419   }
2420 
2421   /**
2422    * Query the state of the balancer from the Master. It's not a guarantee that the balancer is
2423    * actually running this very moment, but that it will run.
2424    *
2425    * @return True if the balancer is enabled, false otherwise.
2426    */
2427   @Override
2428   public boolean isBalancerEnabled() throws IOException {
2429     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2430       @Override
2431       public Boolean call(int callTimeout) throws ServiceException {
2432         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2433         controller.setCallTimeout(callTimeout);
2434 
2435         return master.isBalancerEnabled(controller,
2436           RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
2437       }
2438     });
2439   }
2440 
2441   /**
2442    * Invoke region normalizer. Can NOT run for various reasons.  Check logs.
2443    *
2444    * @return True if region normalizer ran, false otherwise.
2445    */
2446   @Override
2447   public boolean normalize() throws IOException {
2448     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2449       @Override
2450       public Boolean call(int callTimeout) throws ServiceException {
2451         return master.normalize(null,
2452           RequestConverter.buildNormalizeRequest()).getNormalizerRan();
2453       }
2454     });
2455   }
2456 
2457   /**
2458    * Query the current state of the region normalizer
2459    *
2460    * @return true if region normalizer is enabled, false otherwise.
2461    */
2462   public boolean isNormalizerEnabled() throws IOException {
2463     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2464       @Override
2465       public Boolean call(int callTimeout) throws ServiceException {
2466         return master.isNormalizerEnabled(null,
2467           RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
2468       }
2469     });
2470   }
2471 
2472   /**
2473    * Turn region normalizer on or off.
2474    *
2475    * @return Previous normalizer value
2476    */
2477   public boolean setNormalizerRunning(final boolean on) throws IOException {
2478     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2479       @Override
2480       public Boolean call(int callTimeout) throws ServiceException {
2481         SetNormalizerRunningRequest req =
2482           RequestConverter.buildSetNormalizerRunningRequest(on);
2483         return master.setNormalizerRunning(null, req).getPrevNormalizerValue();
2484       }
2485     });
2486   }
2487 
2488   /**
2489    * Enable/Disable the catalog janitor
2490    * @param enable if true enables the catalog janitor
2491    * @return the previous state
2492    * @throws MasterNotRunningException
2493    */
2494   @Override
2495   public boolean enableCatalogJanitor(final boolean enable)
2496       throws IOException {
2497     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2498       @Override
2499       public Boolean call(int callTimeout) throws ServiceException {
2500         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2501         controller.setCallTimeout(callTimeout);
2502 
2503         return master.enableCatalogJanitor(controller,
2504           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
2505       }
2506     });
2507   }
2508 
2509   /**
2510    * Ask for a scan of the catalog table
2511    * @return the number of entries cleaned
2512    * @throws MasterNotRunningException
2513    */
2514   @Override
2515   public int runCatalogScan() throws IOException {
2516     return executeCallable(new MasterCallable<Integer>(getConnection()) {
2517       @Override
2518       public Integer call(int callTimeout) throws ServiceException {
2519         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2520         controller.setCallTimeout(callTimeout);
2521 
2522         return master.runCatalogScan(controller,
2523           RequestConverter.buildCatalogScanRequest()).getScanResult();
2524       }
2525     });
2526   }
2527 
2528   /**
2529    * Query on the catalog janitor state (Enabled/Disabled?)
2530    * @throws org.apache.hadoop.hbase.MasterNotRunningException
2531    */
2532   @Override
2533   public boolean isCatalogJanitorEnabled() throws IOException {
2534     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2535       @Override
2536       public Boolean call(int callTimeout) throws ServiceException {
2537         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2538         controller.setCallTimeout(callTimeout);
2539 
2540         return master.isCatalogJanitorEnabled(controller,
2541           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
2542       }
2543     });
2544   }
2545 
2546   /**
2547    * Merge two regions. Asynchronous operation.
2548    * @param encodedNameOfRegionA encoded name of region a
2549    * @param encodedNameOfRegionB encoded name of region b
2550    * @param forcible true if do a compulsory merge, otherwise we will only merge
2551    *          two adjacent regions
2552    * @throws IOException
2553    */
2554   @Override
2555   public void mergeRegions(final byte[] encodedNameOfRegionA,
2556       final byte[] encodedNameOfRegionB, final boolean forcible)
2557       throws IOException {
2558     Pair<HRegionInfo, ServerName> pair = getRegion(encodedNameOfRegionA);
2559     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2560       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2561     pair = getRegion(encodedNameOfRegionB);
2562     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2563       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2564     executeCallable(new MasterCallable<Void>(getConnection()) {
2565       @Override
2566       public Void call(int callTimeout) throws ServiceException {
2567         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2568         controller.setCallTimeout(callTimeout);
2569 
2570         try {
2571           DispatchMergingRegionsRequest request = RequestConverter
2572               .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2573                 encodedNameOfRegionB, forcible);
2574           master.dispatchMergingRegions(controller, request);
2575         } catch (DeserializationException de) {
2576           LOG.error("Could not parse destination server name: " + de);
2577         }
2578         return null;
2579       }
2580     });
2581   }
2582 
2583   /**
2584    * {@inheritDoc}
2585    */
2586   @Override
2587   public void split(final TableName tableName)
2588     throws IOException {
2589     split(tableName, null);
2590   }
2591 
2592   /**
2593    * {@inheritDoc}
2594    */
2595   @Override
2596   public void splitRegion(final byte[] regionName)
2597     throws IOException {
2598     splitRegion(regionName, null);
2599   }
2600 
2601   /**
2602    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2603    * (byte[])} instead.
2604    */
2605   @Deprecated
2606   public void split(final String tableNameOrRegionName)
2607   throws IOException, InterruptedException {
2608     split(Bytes.toBytes(tableNameOrRegionName));
2609   }
2610 
2611   /**
2612    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2613    * (byte[])} instead.
2614    */
2615   @Deprecated
2616   public void split(final byte[] tableNameOrRegionName)
2617   throws IOException, InterruptedException {
2618     split(tableNameOrRegionName, null);
2619   }
2620 
2621   /**
2622    * {@inheritDoc}
2623    */
2624   @Override
2625   public void split(final TableName tableName, final byte [] splitPoint)
2626   throws IOException {
2627     ZooKeeperWatcher zookeeper = null;
2628     try {
2629       checkTableExists(tableName);
2630       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2631         new ThrowableAbortable());
2632       List<Pair<HRegionInfo, ServerName>> pairs =
2633         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2634       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2635         // May not be a server for a particular row
2636         if (pair.getSecond() == null) continue;
2637         HRegionInfo r = pair.getFirst();
2638         // check for parents
2639         if (r.isSplitParent()) continue;
2640         // if a split point given, only split that particular region
2641         if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
2642            (splitPoint != null && !r.containsRow(splitPoint))) continue;
2643         // call out to region server to do split now
2644         split(pair.getSecond(), pair.getFirst(), splitPoint);
2645       }
2646     } finally {
2647       if (zookeeper != null) {
2648         zookeeper.close();
2649       }
2650     }
2651   }
2652 
2653   /**
2654    * {@inheritDoc}
2655    */
2656   @Override
2657   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2658   throws IOException {
2659     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2660     if (regionServerPair == null) {
2661       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2662     }
2663     if (regionServerPair.getFirst() != null &&
2664         regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2665       throw new IllegalArgumentException("Can't split replicas directly. "
2666           + "Replicas are auto-split when their primary is split.");
2667     }
2668     if (regionServerPair.getSecond() == null) {
2669       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2670     }
2671     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2672   }
2673 
2674   /**
2675    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2676    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2677    */
2678   @Deprecated
2679   public void split(final String tableNameOrRegionName,
2680     final String splitPoint) throws IOException {
2681     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2682   }
2683 
2684   /**
2685    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2686    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2687    */
2688   @Deprecated
2689   public void split(final byte[] tableNameOrRegionName,
2690       final byte [] splitPoint) throws IOException {
2691     try {
2692       splitRegion(tableNameOrRegionName, splitPoint);
2693     } catch (IllegalArgumentException e) {
2694       // Bad region, try table
2695       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2696     }
2697   }
2698 
2699   @VisibleForTesting
2700   public void split(final ServerName sn, final HRegionInfo hri,
2701       byte[] splitPoint) throws IOException {
2702     if (hri.getStartKey() != null && splitPoint != null &&
2703          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2704        throw new IOException("should not give a splitkey which equals to startkey!");
2705     }
2706     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2707     controller.setPriority(hri.getTable());
2708 
2709     // TODO: this does not do retries, it should. Set priority and timeout in controller
2710     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2711     ProtobufUtil.split(controller, admin, hri, splitPoint);
2712   }
2713 
2714   
2715   Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException {
2716     BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf);
2717     if (userRequest.getTableList() != null) {
2718       for (TableName table : userRequest.getTableList()) {
2719         if (!tableExists(table)) {
2720           throw new DoNotRetryIOException(table + "does not exist");
2721         }
2722       }
2723     }
2724     BackupTablesResponse response = executeCallable(
2725       new MasterCallable<BackupTablesResponse>(getConnection()) {
2726         @Override
2727         public BackupTablesResponse call(int callTimeout) throws ServiceException {
2728           BackupTablesRequest request = RequestConverter.buildBackupTablesRequest(
2729             userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(),
2730             userRequest.getWorkers(), userRequest.getBandwidth());
2731           return master.backupTables(null, request);
2732         }
2733       });
2734     return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response);
2735   }
2736 
2737   /**
2738    * Do a get with a timeout against the passed in <code>future<code>.
2739    */
2740   private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units)
2741   throws IOException {
2742     try {
2743       // TODO: how long should we wait? Spin forever?
2744       return future.get(timeout, units);
2745     } catch (InterruptedException e) {
2746       throw new InterruptedIOException("Interrupt while waiting on " + future);
2747     } catch (TimeoutException e) {
2748       throw new TimeoutIOException(e);
2749     } catch (ExecutionException e) {
2750       if (e.getCause() instanceof IOException) {
2751         throw (IOException)e.getCause();
2752       } else {
2753         throw new IOException(e.getCause());
2754       }
2755     }
2756   }
2757 
2758   
2759   public String backupTables(final BackupRequest userRequest) throws IOException {
2760     return get(
2761       backupTablesAsync(userRequest),
2762       backupWaitTimeout,
2763       TimeUnit.SECONDS);
2764   }
2765 
2766   public static class TableBackupFuture extends TableFuture<String> {
2767     String backupId;
2768     public TableBackupFuture(final HBaseAdmin admin, final TableName tableName,
2769         final BackupTablesResponse response) {
2770       super(admin, tableName,
2771           (response != null && response.hasProcId()) ? response.getProcId() : null);
2772       backupId = response.getBackupId();
2773     }
2774 
2775     String getBackupId() {
2776       return backupId;
2777     }
2778 
2779     @Override
2780     public String getOperationType() {
2781       return "BACKUP";
2782     }
2783 
2784     @Override
2785     protected String convertResult(final GetProcedureResultResponse response) throws IOException {
2786       if (response.hasException()) {
2787         throw ForeignExceptionUtil.toIOException(response.getException());
2788       }
2789       ByteString result = response.getResult();
2790       if (result == null) return null;
2791       return Bytes.toStringBinary(result.toByteArray());
2792     }
2793 
2794     @Override
2795     protected String postOperationResult(final String result,
2796         final long deadlineTs) throws IOException, TimeoutException {
2797       return result;
2798     }
2799   }
2800 
2801   /**
2802    * Modify an existing table, more IRB friendly version.
2803    * Asynchronous operation.  This means that it may be a while before your
2804    * schema change is updated across all of the table.
2805    *
2806    * @param tableName name of table.
2807    * @param htd modified description of the table
2808    * @throws IOException if a remote or network exception occurs
2809    */
2810   @Override
2811   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2812   throws IOException {
2813     if (!tableName.equals(htd.getTableName())) {
2814       throw new IllegalArgumentException("the specified table name '" + tableName +
2815         "' doesn't match with the HTD one: " + htd.getTableName());
2816     }
2817 
2818     executeCallable(new MasterCallable<Void>(getConnection()) {
2819       @Override
2820       public Void call(int callTimeout) throws ServiceException {
2821         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2822         controller.setCallTimeout(callTimeout);
2823         controller.setPriority(tableName);
2824         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
2825                 tableName, htd, ng.getNonceGroup(), ng.newNonce());
2826         master.modifyTable(controller, request);
2827         return null;
2828       }
2829     });
2830   }
2831 
2832   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2833   throws IOException {
2834     modifyTable(TableName.valueOf(tableName), htd);
2835   }
2836 
2837   public void modifyTable(final String tableName, final HTableDescriptor htd)
2838   throws IOException {
2839     modifyTable(TableName.valueOf(tableName), htd);
2840   }
2841 
2842   /**
2843    * @param regionName Name of a region.
2844    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2845    *  a verified region name (we call {@link
2846    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2847    *  else null.
2848    * Throw IllegalArgumentException if <code>regionName</code> is null.
2849    * @throws IOException
2850    */
2851   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2852     if (regionName == null) {
2853       throw new IllegalArgumentException("Pass a table name or region name");
2854     }
2855     Pair<HRegionInfo, ServerName> pair =
2856       MetaTableAccessor.getRegion(connection, regionName);
2857     if (pair == null) {
2858       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2859         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2860       final String encodedName = Bytes.toString(regionName);
2861       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2862         @Override
2863         public boolean processRow(Result data) throws IOException {
2864           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2865           if (info == null) {
2866             LOG.warn("No serialized HRegionInfo in " + data);
2867             return true;
2868           }
2869           RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
2870           boolean matched = false;
2871           ServerName sn = null;
2872           for (HRegionLocation h : rl.getRegionLocations()) {
2873             if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
2874               sn = h.getServerName();
2875               info = h.getRegionInfo();
2876               matched = true;
2877             }
2878           }
2879           if (!matched) return true;
2880           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2881           return false; // found the region, stop
2882         }
2883       };
2884 
2885       MetaScanner.metaScan(connection, visitor, null);
2886       pair = result.get();
2887     }
2888     return pair;
2889   }
2890 
2891   /**
2892    * If the input is a region name, it is returned as is. If it's an
2893    * encoded region name, the corresponding region is found from meta
2894    * and its region name is returned. If we can't find any region in
2895    * meta matching the input as either region name or encoded region
2896    * name, the input is returned as is. We don't throw unknown
2897    * region exception.
2898    */
2899   private byte[] getRegionName(
2900       final byte[] regionNameOrEncodedRegionName) throws IOException {
2901     if (Bytes.equals(regionNameOrEncodedRegionName,
2902         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2903           || Bytes.equals(regionNameOrEncodedRegionName,
2904             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2905       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2906     }
2907     byte[] tmp = regionNameOrEncodedRegionName;
2908     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2909     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2910       tmp = regionServerPair.getFirst().getRegionName();
2911     }
2912     return tmp;
2913   }
2914 
2915   /**
2916    * Check if table exists or not
2917    * @param tableName Name of a table.
2918    * @return tableName instance
2919    * @throws IOException if a remote or network exception occurs.
2920    * @throws TableNotFoundException if table does not exist.
2921    */
2922   private TableName checkTableExists(final TableName tableName)
2923       throws IOException {
2924     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2925       throw new TableNotFoundException(tableName);
2926     }
2927     return tableName;
2928   }
2929 
2930   /**
2931    * Shuts down the HBase cluster
2932    * @throws IOException if a remote or network exception occurs
2933    */
2934   @Override
2935   public synchronized void shutdown() throws IOException {
2936     executeCallable(new MasterCallable<Void>(getConnection()) {
2937       @Override
2938       public Void call(int callTimeout) throws ServiceException {
2939         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2940         controller.setCallTimeout(callTimeout);
2941         controller.setPriority(HConstants.HIGH_QOS);
2942         master.shutdown(controller, ShutdownRequest.newBuilder().build());
2943         return null;
2944       }
2945     });
2946   }
2947 
2948   /**
2949    * Shuts down the current HBase master only.
2950    * Does not shutdown the cluster.
2951    * @see #shutdown()
2952    * @throws IOException if a remote or network exception occurs
2953    */
2954   @Override
2955   public synchronized void stopMaster() throws IOException {
2956     executeCallable(new MasterCallable<Void>(getConnection()) {
2957       @Override
2958       public Void call(int callTimeout) throws ServiceException {
2959         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2960         controller.setCallTimeout(callTimeout);
2961         controller.setPriority(HConstants.HIGH_QOS);
2962         master.stopMaster(controller, StopMasterRequest.newBuilder().build());
2963         return null;
2964       }
2965     });
2966   }
2967 
2968   /**
2969    * Stop the designated regionserver
2970    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2971    * <code>example.org:1234</code>
2972    * @throws IOException if a remote or network exception occurs
2973    */
2974   @Override
2975   public synchronized void stopRegionServer(final String hostnamePort)
2976   throws IOException {
2977     String hostname = Addressing.parseHostname(hostnamePort);
2978     int port = Addressing.parsePort(hostnamePort);
2979     AdminService.BlockingInterface admin =
2980       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2981     StopServerRequest request = RequestConverter.buildStopServerRequest(
2982       "Called by admin client " + this.connection.toString());
2983     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2984 
2985     controller.setPriority(HConstants.HIGH_QOS);
2986     try {
2987       // TODO: this does not do retries, it should. Set priority and timeout in controller
2988       admin.stopServer(controller, request);
2989     } catch (ServiceException se) {
2990       throw ProtobufUtil.getRemoteException(se);
2991     }
2992   }
2993 
2994 
2995   /**
2996    * @return cluster status
2997    * @throws IOException if a remote or network exception occurs
2998    */
2999   @Override
3000   public boolean isMasterInMaintenanceMode() throws IOException {
3001     return executeCallable(new MasterCallable<IsInMaintenanceModeResponse>(getConnection()) {
3002       @Override
3003       public IsInMaintenanceModeResponse call(int callTimeout) throws ServiceException {
3004         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3005         controller.setCallTimeout(callTimeout);
3006         return master.isMasterInMaintenanceMode(
3007           controller, IsInMaintenanceModeRequest.newBuilder().build());
3008       }
3009     }).getInMaintenanceMode();
3010   }
3011 
3012   @Override
3013   public ClusterStatus getClusterStatus() throws IOException {
3014     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
3015       @Override
3016       public ClusterStatus call(int callTimeout) throws ServiceException {
3017         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3018         controller.setCallTimeout(callTimeout);
3019         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
3020         return ClusterStatus.convert(master.getClusterStatus(controller, req).getClusterStatus());
3021       }
3022     });
3023   }
3024 
3025   /**
3026    * @return Configuration used by the instance.
3027    */
3028   @Override
3029   public Configuration getConfiguration() {
3030     return this.conf;
3031   }
3032 
3033   /**
3034    * Create a new namespace
3035    * @param descriptor descriptor which describes the new namespace
3036    * @throws IOException
3037    */
3038   @Override
3039   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
3040     executeCallable(new MasterCallable<Void>(getConnection()) {
3041       @Override
3042       public Void call(int callTimeout) throws Exception {
3043         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3044         controller.setCallTimeout(callTimeout);
3045         // TODO: set priority based on NS?
3046         master.createNamespace(controller,
3047           CreateNamespaceRequest.newBuilder()
3048             .setNamespaceDescriptor(ProtobufUtil
3049               .toProtoNamespaceDescriptor(descriptor)).build()
3050         );
3051         return null;
3052       }
3053     });
3054   }
3055 
3056   /**
3057    * Modify an existing namespace
3058    * @param descriptor descriptor which describes the new namespace
3059    * @throws IOException
3060    */
3061   @Override
3062   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
3063     executeCallable(new MasterCallable<Void>(getConnection()) {
3064       @Override
3065       public Void call(int callTimeout) throws Exception {
3066         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3067         controller.setCallTimeout(callTimeout);
3068         master.modifyNamespace(controller, ModifyNamespaceRequest.newBuilder().
3069           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
3070         return null;
3071       }
3072     });
3073   }
3074 
3075   /**
3076    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
3077    * @param name namespace name
3078    * @throws IOException
3079    */
3080   @Override
3081   public void deleteNamespace(final String name) throws IOException {
3082     executeCallable(new MasterCallable<Void>(getConnection()) {
3083       @Override
3084       public Void call(int callTimeout) throws Exception {
3085         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3086         controller.setCallTimeout(callTimeout);
3087         master.deleteNamespace(controller, DeleteNamespaceRequest.newBuilder().
3088           setNamespaceName(name).build());
3089         return null;
3090       }
3091     });
3092   }
3093 
3094   /**
3095    * Get a namespace descriptor by name
3096    * @param name name of namespace descriptor
3097    * @return A descriptor
3098    * @throws IOException
3099    */
3100   @Override
3101   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
3102     return
3103         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
3104           @Override
3105           public NamespaceDescriptor call(int callTimeout) throws Exception {
3106             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3107             controller.setCallTimeout(callTimeout);
3108             return ProtobufUtil.toNamespaceDescriptor(
3109               master.getNamespaceDescriptor(controller, GetNamespaceDescriptorRequest.newBuilder().
3110                 setNamespaceName(name).build()).getNamespaceDescriptor());
3111           }
3112         });
3113   }
3114 
3115   /**
3116    * List available namespace descriptors
3117    * @return List of descriptors
3118    * @throws IOException
3119    */
3120   @Override
3121   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
3122     return
3123         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
3124           @Override
3125           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
3126             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3127             controller.setCallTimeout(callTimeout);
3128             List<HBaseProtos.NamespaceDescriptor> list =
3129                 master.listNamespaceDescriptors(controller,
3130                   ListNamespaceDescriptorsRequest.newBuilder().build())
3131                 .getNamespaceDescriptorList();
3132             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
3133             for(int i = 0; i < list.size(); i++) {
3134               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
3135             }
3136             return res;
3137           }
3138         });
3139   }
3140 
3141   /**
3142    * List procedures
3143    * @return procedure list
3144    * @throws IOException
3145    */
3146   @Override
3147   public ProcedureInfo[] listProcedures() throws IOException {
3148     return
3149         executeCallable(new MasterCallable<ProcedureInfo[]>(getConnection()) {
3150           @Override
3151           public ProcedureInfo[] call(int callTimeout) throws Exception {
3152             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3153             controller.setCallTimeout(callTimeout);
3154             List<ProcedureProtos.Procedure> procList = master.listProcedures(
3155               controller, ListProceduresRequest.newBuilder().build()).getProcedureList();
3156             ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
3157             for (int i = 0; i < procList.size(); i++) {
3158               procInfoList[i] = ProcedureInfo.convert(procList.get(i));
3159             }
3160             return procInfoList;
3161           }
3162         });
3163   }
3164 
3165   /**
3166    * Get list of table descriptors by namespace
3167    * @param name namespace name
3168    * @return A descriptor
3169    * @throws IOException
3170    */
3171   @Override
3172   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
3173     return
3174         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3175           @Override
3176           public HTableDescriptor[] call(int callTimeout) throws Exception {
3177             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3178             controller.setCallTimeout(callTimeout);
3179             List<TableSchema> list =
3180                 master.listTableDescriptorsByNamespace(controller,
3181                   ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name)
3182                   .build()).getTableSchemaList();
3183             HTableDescriptor[] res = new HTableDescriptor[list.size()];
3184             for(int i=0; i < list.size(); i++) {
3185 
3186               res[i] = HTableDescriptor.convert(list.get(i));
3187             }
3188             return res;
3189           }
3190         });
3191   }
3192 
3193   /**
3194    * Get list of table names by namespace
3195    * @param name namespace name
3196    * @return The list of table names in the namespace
3197    * @throws IOException
3198    */
3199   @Override
3200   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
3201     return
3202         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
3203           @Override
3204           public TableName[] call(int callTimeout) throws Exception {
3205             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3206             controller.setCallTimeout(callTimeout);
3207             List<TableProtos.TableName> tableNames =
3208               master.listTableNamesByNamespace(controller, ListTableNamesByNamespaceRequest.
3209                 newBuilder().setNamespaceName(name).build())
3210                 .getTableNameList();
3211             TableName[] result = new TableName[tableNames.size()];
3212             for (int i = 0; i < tableNames.size(); i++) {
3213               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
3214             }
3215             return result;
3216           }
3217         });
3218   }
3219 
3220   /**
3221    * Check to see if HBase is running. Throw an exception if not.
3222    * @param conf system configuration
3223    * @throws MasterNotRunningException if the master is not running
3224    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
3225    */
3226   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
3227   public static void checkHBaseAvailable(Configuration conf)
3228   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
3229     Configuration copyOfConf = HBaseConfiguration.create(conf);
3230     // We set it to make it fail as soon as possible if HBase is not available
3231     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
3232     copyOfConf.setInt("zookeeper.recovery.retry", 0);
3233     try (ClusterConnection connection =
3234         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
3235         // Check ZK first.
3236         // If the connection exists, we may have a connection to ZK that does not work anymore
3237         ZooKeeperKeepAliveConnection zkw = null;
3238         try {
3239           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
3240           zkw = ((ConnectionManager.HConnectionImplementation)connection).
3241             getKeepAliveZooKeeperWatcher();
3242           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
3243         } catch (IOException e) {
3244           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3245         } catch (InterruptedException e) {
3246           throw (InterruptedIOException)
3247             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
3248         } catch (KeeperException e) {
3249           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3250         } finally {
3251           if (zkw != null) {
3252             zkw.close();
3253           }
3254         }
3255       connection.isMasterRunning();
3256     }
3257   }
3258 
3259   /**
3260    * get the regions of a given table.
3261    *
3262    * @param tableName the name of the table
3263    * @return Ordered list of {@link HRegionInfo}.
3264    * @throws IOException
3265    */
3266   @Override
3267   public List<HRegionInfo> getTableRegions(final TableName tableName)
3268   throws IOException {
3269     ZooKeeperWatcher zookeeper =
3270       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3271         new ThrowableAbortable());
3272     List<HRegionInfo> Regions = null;
3273     try {
3274       Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
3275     } finally {
3276       zookeeper.close();
3277     }
3278     return Regions;
3279   }
3280 
3281   public List<HRegionInfo> getTableRegions(final byte[] tableName)
3282   throws IOException {
3283     return getTableRegions(TableName.valueOf(tableName));
3284   }
3285 
3286   @Override
3287   public synchronized void close() throws IOException {
3288     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
3289       this.connection.close();
3290       this.closed = true;
3291     }
3292   }
3293 
3294   /**
3295    * Get tableDescriptors
3296    * @param tableNames List of table names
3297    * @return HTD[] the tableDescriptor
3298    * @throws IOException if a remote or network exception occurs
3299    */
3300   @Override
3301   public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
3302   throws IOException {
3303     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3304       @Override
3305       public HTableDescriptor[] call(int callTimeout) throws Exception {
3306         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3307         controller.setCallTimeout(callTimeout);
3308         GetTableDescriptorsRequest req =
3309             RequestConverter.buildGetTableDescriptorsRequest(tableNames);
3310           return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
3311       }
3312     });
3313   }
3314 
3315   /**
3316    * Get tableDescriptor
3317    * @param tableName one table name
3318    * @return HTD the HTableDescriptor or null if the table not exists
3319    * @throws IOException if a remote or network exception occurs
3320    */
3321   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
3322       throws IOException {
3323     List<TableName> tableNames = new ArrayList<TableName>(1);
3324     tableNames.add(tableName);
3325 
3326     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
3327 
3328     if (htdl == null || htdl.length == 0) {
3329       return null;
3330     }
3331     else {
3332       return htdl[0];
3333     }
3334   }
3335 
3336   /**
3337    * Get tableDescriptors
3338    * @param names List of table names
3339    * @return HTD[] the tableDescriptor
3340    * @throws IOException if a remote or network exception occurs
3341    */
3342   @Override
3343   public HTableDescriptor[] getTableDescriptors(List<String> names)
3344   throws IOException {
3345     List<TableName> tableNames = new ArrayList<TableName>(names.size());
3346     for(String name : names) {
3347       tableNames.add(TableName.valueOf(name));
3348     }
3349     return getTableDescriptorsByTableName(tableNames);
3350   }
3351 
3352   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
3353       FailedLogCloseException {
3354     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3355     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
3356     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3357 
3358     try {
3359       // TODO: this does not do retries, it should. Set priority and timeout in controller
3360       return admin.rollWALWriter(controller, request);
3361     } catch (ServiceException se) {
3362       throw ProtobufUtil.getRemoteException(se);
3363     }
3364   }
3365 
3366   /**
3367    * Roll the log writer. I.e. when using a file system based write ahead log,
3368    * start writing log messages to a new file.
3369    *
3370    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
3371    * This method will return as soon as the roll is requested and the return value will
3372    * always be null. Additionally, the named region server may schedule store flushes at the
3373    * request of the wal handling the roll request.
3374    *
3375    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
3376    * return value may be either null or a list of encoded region names.
3377    *
3378    * @param serverName
3379    *          The servername of the regionserver. A server name is made of host,
3380    *          port and startcode. This is mandatory. Here is an example:
3381    *          <code> host187.example.com,60020,1289493121758</code>
3382    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
3383    *         clean up some underlying files. null if there's nothing to flush.
3384    * @throws IOException if a remote or network exception occurs
3385    * @throws FailedLogCloseException
3386    * @deprecated use {@link #rollWALWriter(ServerName)}
3387    */
3388   @Deprecated
3389   public synchronized byte[][] rollHLogWriter(String serverName)
3390       throws IOException, FailedLogCloseException {
3391     ServerName sn = ServerName.valueOf(serverName);
3392     final RollWALWriterResponse response = rollWALWriterImpl(sn);
3393     int regionCount = response.getRegionToFlushCount();
3394     if (0 == regionCount) {
3395       return null;
3396     }
3397     byte[][] regionsToFlush = new byte[regionCount][];
3398     for (int i = 0; i < regionCount; i++) {
3399       ByteString region = response.getRegionToFlush(i);
3400       regionsToFlush[i] = region.toByteArray();
3401     }
3402     return regionsToFlush;
3403   }
3404 
3405   @Override
3406   public synchronized void rollWALWriter(ServerName serverName)
3407       throws IOException, FailedLogCloseException {
3408     rollWALWriterImpl(serverName);
3409   }
3410 
3411   @Override
3412   public String[] getMasterCoprocessors() {
3413     try {
3414       return getClusterStatus().getMasterCoprocessors();
3415     } catch (IOException e) {
3416       LOG.error("Could not getClusterStatus()",e);
3417       return null;
3418     }
3419   }
3420 
3421   /**
3422    * {@inheritDoc}
3423    */
3424   @Override
3425   public CompactionState getCompactionState(final TableName tableName, CompactType compactType)
3426   throws IOException {
3427     CompactionState state = CompactionState.NONE;
3428     checkTableExists(tableName);
3429     switch (compactType) {
3430       case MOB:
3431         try {
3432           ServerName master = getMasterAddress();
3433           HRegionInfo info = getMobRegionInfo(tableName);
3434           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3435                   info.getRegionName(), true);
3436           GetRegionInfoResponse response = this.connection.getAdmin(master)
3437                   .getRegionInfo(null, request);
3438           state = response.getCompactionState();
3439         } catch (ServiceException se) {
3440           throw ProtobufUtil.getRemoteException(se);
3441         }
3442         break;
3443       case NORMAL:
3444         ZooKeeperWatcher zookeeper =
3445         new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3446           new ThrowableAbortable());
3447         try {
3448           checkTableExists(tableName);
3449           List<Pair<HRegionInfo, ServerName>> pairs =
3450               MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
3451           for (Pair<HRegionInfo, ServerName> pair: pairs) {
3452             if (pair.getFirst().isOffline()) continue;
3453             if (pair.getSecond() == null) continue;
3454             try {
3455               ServerName sn = pair.getSecond();
3456               AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3457               GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3458                 pair.getFirst().getRegionName(), true);
3459               GetRegionInfoResponse response = admin.getRegionInfo(null, request);
3460               switch (response.getCompactionState()) {
3461               case MAJOR_AND_MINOR:
3462                 return CompactionState.MAJOR_AND_MINOR;
3463               case MAJOR:
3464                 if (state == CompactionState.MINOR) {
3465                   return CompactionState.MAJOR_AND_MINOR;
3466                 }
3467                 state = CompactionState.MAJOR;
3468                 break;
3469               case MINOR:
3470                 if (state == CompactionState.MAJOR) {
3471                   return CompactionState.MAJOR_AND_MINOR;
3472                 }
3473                 state = CompactionState.MINOR;
3474                 break;
3475               case NONE:
3476               default: // nothing, continue
3477               }
3478             } catch (NotServingRegionException e) {
3479               if (LOG.isDebugEnabled()) {
3480                 LOG.debug("Trying to get compaction state of " +
3481                     pair.getFirst() + ": " +
3482                     StringUtils.stringifyException(e));
3483               }
3484             } catch (RemoteException e) {
3485               if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3486                 if (LOG.isDebugEnabled()) {
3487                   LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
3488                       + StringUtils.stringifyException(e));
3489                 }
3490               } else {
3491                 throw e;
3492               }
3493             }
3494           }
3495         } catch (ServiceException se) {
3496           throw ProtobufUtil.getRemoteException(se);
3497         } finally {
3498           zookeeper.close();
3499         }
3500         break;
3501     }
3502     return state;
3503   }
3504 
3505   /**
3506    * {@inheritDoc}
3507    */
3508   @Override
3509   public CompactionState getCompactionStateForRegion(final byte[] regionName)
3510   throws IOException {
3511     try {
3512       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
3513       if (regionServerPair == null) {
3514         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
3515       }
3516       if (regionServerPair.getSecond() == null) {
3517         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
3518       }
3519       ServerName sn = regionServerPair.getSecond();
3520       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3521       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3522         regionServerPair.getFirst().getRegionName(), true);
3523       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3524       // TODO: this does not do retries, it should. Set priority and timeout in controller
3525       GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
3526       return response.getCompactionState();
3527     } catch (ServiceException se) {
3528       throw ProtobufUtil.getRemoteException(se);
3529     }
3530   }
3531 
3532   /**
3533    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3534    * #getCompactionStateForRegion(byte[])} instead.
3535    */
3536   @Deprecated
3537   public CompactionState getCompactionState(final String tableNameOrRegionName)
3538   throws IOException, InterruptedException {
3539     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
3540   }
3541 
3542   /**
3543    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3544    * #getCompactionStateForRegion(byte[])} instead.
3545    */
3546   @Deprecated
3547   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
3548   throws IOException, InterruptedException {
3549     try {
3550       return getCompactionStateForRegion(tableNameOrRegionName);
3551     } catch (IllegalArgumentException e) {
3552       // Invalid region, try table
3553       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
3554     }
3555   }
3556 
3557   /**
3558    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
3559    * taken. If the table is disabled, an offline snapshot is taken.
3560    * <p>
3561    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3562    * snapshot with the same name (even a different type or with different parameters) will fail with
3563    * a {@link SnapshotCreationException} indicating the duplicate naming.
3564    * <p>
3565    * Snapshot names follow the same naming constraints as tables in HBase. See
3566    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3567    * @param snapshotName name of the snapshot to be created
3568    * @param tableName name of the table for which snapshot is created
3569    * @throws IOException if a remote or network exception occurs
3570    * @throws SnapshotCreationException if snapshot creation failed
3571    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3572    */
3573   @Override
3574   public void snapshot(final String snapshotName,
3575                        final TableName tableName) throws IOException,
3576       SnapshotCreationException, IllegalArgumentException {
3577     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
3578   }
3579 
3580   public void snapshot(final String snapshotName,
3581                        final String tableName) throws IOException,
3582       SnapshotCreationException, IllegalArgumentException {
3583     snapshot(snapshotName, TableName.valueOf(tableName),
3584         SnapshotDescription.Type.FLUSH);
3585   }
3586 
3587   /**
3588    * Create snapshot for the given table of given flush type.
3589    * <p>
3590    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3591    * snapshot with the same name (even a different type or with different parameters) will fail with
3592    * a {@link SnapshotCreationException} indicating the duplicate naming.
3593    * <p>
3594    * Snapshot names follow the same naming constraints as tables in HBase.
3595    * @param snapshotName name of the snapshot to be created
3596    * @param tableName name of the table for which snapshot is created
3597    * @param flushType if the snapshot should be taken without flush memstore first
3598    * @throws IOException if a remote or network exception occurs
3599    * @throws SnapshotCreationException if snapshot creation failed
3600    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3601    */
3602    public void snapshot(final byte[] snapshotName, final byte[] tableName,
3603                        final SnapshotDescription.Type flushType) throws
3604       IOException, SnapshotCreationException, IllegalArgumentException {
3605       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
3606   }
3607   /**
3608    public void snapshot(final String snapshotName,
3609     * Create a timestamp consistent snapshot for the given table.
3610                         final byte[] tableName) throws IOException,
3611     * <p>
3612     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3613     * snapshot with the same name (even a different type or with different parameters) will fail with
3614     * a {@link SnapshotCreationException} indicating the duplicate naming.
3615     * <p>
3616     * Snapshot names follow the same naming constraints as tables in HBase.
3617     * @param snapshotName name of the snapshot to be created
3618     * @param tableName name of the table for which snapshot is created
3619     * @throws IOException if a remote or network exception occurs
3620     * @throws SnapshotCreationException if snapshot creation failed
3621     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3622     */
3623   @Override
3624   public void snapshot(final byte[] snapshotName,
3625                        final TableName tableName) throws IOException,
3626       SnapshotCreationException, IllegalArgumentException {
3627     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
3628   }
3629 
3630   public void snapshot(final byte[] snapshotName,
3631                        final byte[] tableName) throws IOException,
3632       SnapshotCreationException, IllegalArgumentException {
3633     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
3634       SnapshotDescription.Type.FLUSH);
3635   }
3636 
3637   /**
3638    * Create typed snapshot of the table.
3639    * <p>
3640    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3641    * snapshot with the same name (even a different type or with different parameters) will fail with
3642    * a {@link SnapshotCreationException} indicating the duplicate naming.
3643    * <p>
3644    * Snapshot names follow the same naming constraints as tables in HBase. See
3645    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3646    * <p>
3647    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
3648    *          snapshots stored on the cluster
3649    * @param tableName name of the table to snapshot
3650    * @param type type of snapshot to take
3651    * @throws IOException we fail to reach the master
3652    * @throws SnapshotCreationException if snapshot creation failed
3653    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3654    */
3655   @Override
3656   public void snapshot(final String snapshotName,
3657                        final TableName tableName,
3658                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3659       IllegalArgumentException {
3660     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
3661     builder.setTable(tableName.getNameAsString());
3662     builder.setName(snapshotName);
3663     builder.setType(type);
3664     snapshot(builder.build());
3665   }
3666 
3667   public void snapshot(final String snapshotName,
3668                        final String tableName,
3669                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3670       IllegalArgumentException {
3671     snapshot(snapshotName, TableName.valueOf(tableName), type);
3672   }
3673 
3674   public void snapshot(final String snapshotName,
3675                        final byte[] tableName,
3676                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3677       IllegalArgumentException {
3678     snapshot(snapshotName, TableName.valueOf(tableName), type);
3679   }
3680 
3681   /**
3682    * Take a snapshot and wait for the server to complete that snapshot (blocking).
3683    * <p>
3684    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
3685    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
3686    * time for a single cluster).
3687    * <p>
3688    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3689    * snapshot with the same name (even a different type or with different parameters) will fail with
3690    * a {@link SnapshotCreationException} indicating the duplicate naming.
3691    * <p>
3692    * Snapshot names follow the same naming constraints as tables in HBase. See
3693    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3694    * <p>
3695    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
3696    * unless you are sure about the type of snapshot that you want to take.
3697    * @param snapshot snapshot to take
3698    * @throws IOException or we lose contact with the master.
3699    * @throws SnapshotCreationException if snapshot failed to be taken
3700    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3701    */
3702   @Override
3703   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
3704       IllegalArgumentException {
3705     // actually take the snapshot
3706     SnapshotResponse response = takeSnapshotAsync(snapshot);
3707     waitForSnapshot(snapshot, response.getExpectedTimeout(), getConnection());
3708   }
3709 
3710   public void waitForSnapshot(SnapshotDescription snapshot, long max,
3711       HConnection conn) throws IOException {
3712     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
3713         .build();
3714     IsSnapshotDoneResponse done = null;
3715     long start = EnvironmentEdgeManager.currentTime();
3716     long maxPauseTime = max / numRetries;
3717     int tries = 0;
3718     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
3719         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
3720         maxPauseTime + " ms per retry)");
3721     while (tries == 0
3722         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
3723       try {
3724         // sleep a backoff <= pauseTime amount
3725         long sleep = getPauseTime(tries++, pause);
3726         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3727         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3728           "ms while waiting for snapshot completion.");
3729         Thread.sleep(sleep);
3730       } catch (InterruptedException e) {
3731         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3732       }
3733       LOG.debug("Getting current status of snapshot from master...");
3734       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(conn) {
3735         @Override
3736         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3737           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3738           controller.setCallTimeout(callTimeout);
3739           return master.isSnapshotDone(controller, request);
3740         }
3741       });
3742     }
3743     if (!done.getDone()) {
3744       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
3745           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
3746     }
3747   }
3748 
3749   /**
3750    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
3751    * <p>
3752    * Only a single snapshot should be taken at a time, or results may be undefined.
3753    * @param snapshot snapshot to take
3754    * @return response from the server indicating the max time to wait for the snapshot
3755    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
3756    * @throws SnapshotCreationException if snapshot creation failed
3757    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3758    */
3759   @Override
3760   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3761       SnapshotCreationException {
3762     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3763     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3764         .build();
3765     // run the snapshot on the master
3766     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3767       @Override
3768       public SnapshotResponse call(int callTimeout) throws ServiceException {
3769         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3770         controller.setCallTimeout(callTimeout);
3771         return master.snapshot(controller, request);
3772       }
3773     });
3774   }
3775 
3776   /**
3777    * Check the current state of the passed snapshot.
3778    * <p>
3779    * There are three possible states:
3780    * <ol>
3781    * <li>running - returns <tt>false</tt></li>
3782    * <li>finished - returns <tt>true</tt></li>
3783    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3784    * </ol>
3785    * <p>
3786    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3787    * run/started since the snapshot your are checking, you will recieve an
3788    * {@link UnknownSnapshotException}.
3789    * @param snapshot description of the snapshot to check
3790    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3791    *         running
3792    * @throws IOException if we have a network issue
3793    * @throws HBaseSnapshotException if the snapshot failed
3794    * @throws UnknownSnapshotException if the requested snapshot is unknown
3795    */
3796   @Override
3797   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3798       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3799 
3800     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3801       @Override
3802       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3803         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3804         controller.setCallTimeout(callTimeout);
3805         return master.isSnapshotDone(controller,
3806           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3807       }
3808     }).getDone();
3809   }
3810 
3811   /**
3812    * Restore the specified snapshot on the original table. (The table must be disabled)
3813    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3814    * is set to true, a snapshot of the current table is taken
3815    * before executing the restore operation.
3816    * In case of restore failure, the failsafe snapshot will be restored.
3817    * If the restore completes without problem the failsafe snapshot is deleted.
3818    *
3819    * @param snapshotName name of the snapshot to restore
3820    * @throws IOException if a remote or network exception occurs
3821    * @throws RestoreSnapshotException if snapshot failed to be restored
3822    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3823    */
3824   @Override
3825   public void restoreSnapshot(final byte[] snapshotName)
3826       throws IOException, RestoreSnapshotException {
3827     restoreSnapshot(Bytes.toString(snapshotName));
3828   }
3829 
3830   /**
3831    * Restore the specified snapshot on the original table. (The table must be disabled)
3832    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3833    * is set to true, a snapshot of the current table is taken
3834    * before executing the restore operation.
3835    * In case of restore failure, the failsafe snapshot will be restored.
3836    * If the restore completes without problem the failsafe snapshot is deleted.
3837    *
3838    * @param snapshotName name of the snapshot to restore
3839    * @throws IOException if a remote or network exception occurs
3840    * @throws RestoreSnapshotException if snapshot failed to be restored
3841    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3842    */
3843   @Override
3844   public void restoreSnapshot(final String snapshotName)
3845       throws IOException, RestoreSnapshotException {
3846     boolean takeFailSafeSnapshot =
3847       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3848     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3849   }
3850 
3851   /**
3852    * Restore the specified snapshot on the original table. (The table must be disabled)
3853    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3854    * before executing the restore operation.
3855    * In case of restore failure, the failsafe snapshot will be restored.
3856    * If the restore completes without problem the failsafe snapshot is deleted.
3857    *
3858    * The failsafe snapshot name is configurable by using the property
3859    * "hbase.snapshot.restore.failsafe.name".
3860    *
3861    * @param snapshotName name of the snapshot to restore
3862    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3863    * @throws IOException if a remote or network exception occurs
3864    * @throws RestoreSnapshotException if snapshot failed to be restored
3865    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3866    */
3867   @Override
3868   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3869       throws IOException, RestoreSnapshotException {
3870     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3871   }
3872 
3873   /**
3874    * Restore the specified snapshot on the original table. (The table must be disabled) If
3875    * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
3876    * executing the restore operation. In case of restore failure, the failsafe snapshot will be
3877    * restored. If the restore completes without problem the failsafe snapshot is deleted. The
3878    * failsafe snapshot name is configurable by using the property
3879    * "hbase.snapshot.restore.failsafe.name".
3880    * @param snapshotName name of the snapshot to restore
3881    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3882    * @param restoreAcl true to restore acl of snapshot into table.
3883    * @throws IOException if a remote or network exception occurs
3884    * @throws RestoreSnapshotException if snapshot failed to be restored
3885    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3886    */
3887   @Override
3888   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
3889       throws IOException, RestoreSnapshotException {
3890     TableName tableName = null;
3891     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3892       if (snapshotInfo.getName().equals(snapshotName)) {
3893         tableName = TableName.valueOf(snapshotInfo.getTable());
3894         break;
3895       }
3896     }
3897 
3898     if (tableName == null) {
3899       throw new RestoreSnapshotException(
3900         "Unable to find the table name for snapshot=" + snapshotName);
3901     }
3902 
3903     // The table does not exists, switch to clone.
3904     if (!tableExists(tableName)) {
3905       cloneSnapshot(snapshotName, tableName, restoreAcl);
3906       return;
3907     }
3908 
3909     // Check if the table is disabled
3910     if (!isTableDisabled(tableName)) {
3911       throw new TableNotDisabledException(tableName);
3912     }
3913 
3914     // Take a snapshot of the current state
3915     String failSafeSnapshotSnapshotName = null;
3916     if (takeFailSafeSnapshot) {
3917       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3918         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3919       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3920         .replace("{snapshot.name}", snapshotName)
3921         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3922         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3923       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3924       snapshot(failSafeSnapshotSnapshotName, tableName);
3925     }
3926 
3927     try {
3928       // Restore snapshot
3929       internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
3930     } catch (IOException e) {
3931       // Somthing went wrong during the restore...
3932       // if the pre-restore snapshot is available try to rollback
3933       if (takeFailSafeSnapshot) {
3934         try {
3935           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl);
3936           String msg = "Restore snapshot=" + snapshotName +
3937             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3938           LOG.error(msg, e);
3939           throw new RestoreSnapshotException(msg, e);
3940         } catch (IOException ex) {
3941           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3942           LOG.error(msg, ex);
3943           throw new RestoreSnapshotException(msg, e);
3944         }
3945       } else {
3946         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3947       }
3948     }
3949 
3950     // If the restore is succeeded, delete the pre-restore snapshot
3951     if (takeFailSafeSnapshot) {
3952       try {
3953         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3954         deleteSnapshot(failSafeSnapshotSnapshotName);
3955       } catch (IOException e) {
3956         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3957       }
3958     }
3959   }
3960 
3961   @Override
3962   public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
3963       throws IOException, RestoreSnapshotException {
3964     restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
3965   }
3966 
3967   /**
3968    * Create a new table by cloning the snapshot content.
3969    *
3970    * @param snapshotName name of the snapshot to be cloned
3971    * @param tableName name of the table where the snapshot will be restored
3972    * @throws IOException if a remote or network exception occurs
3973    * @throws TableExistsException if table to be created already exists
3974    * @throws RestoreSnapshotException if snapshot failed to be cloned
3975    * @throws IllegalArgumentException if the specified table has not a valid name
3976    */
3977   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3978       throws IOException, TableExistsException, RestoreSnapshotException {
3979     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3980   }
3981 
3982   /**
3983    * Create a new table by cloning the snapshot content.
3984    *
3985    * @param snapshotName name of the snapshot to be cloned
3986    * @param tableName name of the table where the snapshot will be restored
3987    * @throws IOException if a remote or network exception occurs
3988    * @throws TableExistsException if table to be created already exists
3989    * @throws RestoreSnapshotException if snapshot failed to be cloned
3990    * @throws IllegalArgumentException if the specified table has not a valid name
3991    */
3992   @Override
3993   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3994       throws IOException, TableExistsException, RestoreSnapshotException {
3995     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3996   }
3997 
3998 
3999 
4000   /**
4001    * Create a new table by cloning the snapshot content.
4002    *
4003    * @param snapshotName name of the snapshot to be cloned
4004    * @param tableName name of the table where the snapshot will be restored
4005    * @throws IOException if a remote or network exception occurs
4006    * @throws TableExistsException if table to be created already exists
4007    * @throws RestoreSnapshotException if snapshot failed to be cloned
4008    * @throws IllegalArgumentException if the specified table has not a valid name
4009    */
4010   public void cloneSnapshot(final String snapshotName, final String tableName)
4011       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
4012     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
4013   }
4014 
4015   /**
4016    * Create a new table by cloning the snapshot content.
4017    *
4018    * @param snapshotName name of the snapshot to be cloned
4019    * @param tableName name of the table where the snapshot will be restored
4020    * @throws IOException if a remote or network exception occurs
4021    * @throws TableExistsException if table to be created already exists
4022    * @throws RestoreSnapshotException if snapshot failed to be cloned
4023    * @throws IllegalArgumentException if the specified table has not a valid name
4024    */
4025   @Override
4026   public void cloneSnapshot(final String snapshotName, final TableName tableName,
4027       final boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
4028     if (tableExists(tableName)) {
4029       throw new TableExistsException(tableName);
4030     }
4031     internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
4032     waitUntilTableIsEnabled(tableName);
4033   }
4034 
4035   @Override
4036   public void cloneSnapshot(String snapshotName, TableName tableName)
4037       throws IOException, TableExistsException, RestoreSnapshotException {
4038     cloneSnapshot(snapshotName, tableName, false);
4039   }
4040 
4041   /**
4042    * Execute a distributed procedure on a cluster synchronously with return data
4043    *
4044    * @param signature A distributed procedure is uniquely identified
4045    * by its signature (default the root ZK node name of the procedure).
4046    * @param instance The instance name of the procedure. For some procedures, this parameter is
4047    * optional.
4048    * @param props Property/Value pairs of properties passing to the procedure
4049    * @return data returned after procedure execution. null if no return data.
4050    * @throws IOException
4051    */
4052   @Override
4053   public byte[] execProcedureWithRet(String signature, String instance,
4054       Map<String, String> props) throws IOException {
4055     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
4056     builder.setSignature(signature).setInstance(instance);
4057     for (Entry<String, String> entry : props.entrySet()) {
4058       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
4059           .setValue(entry.getValue()).build();
4060       builder.addConfiguration(pair);
4061     }
4062 
4063     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
4064         .setProcedure(builder.build()).build();
4065     // run the procedure on the master
4066     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
4067         getConnection()) {
4068       @Override
4069       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
4070         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4071         controller.setCallTimeout(callTimeout);
4072         return master.execProcedureWithRet(controller, request);
4073       }
4074     });
4075 
4076     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
4077   }
4078   /**
4079    * Execute a distributed procedure on a cluster.
4080    *
4081    * @param signature A distributed procedure is uniquely identified
4082    * by its signature (default the root ZK node name of the procedure).
4083    * @param instance The instance name of the procedure. For some procedures, this parameter is
4084    * optional.
4085    * @param props Property/Value pairs of properties passing to the procedure
4086    * @throws IOException
4087    */
4088   @Override
4089   public void execProcedure(String signature, String instance,
4090       Map<String, String> props) throws IOException {
4091     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
4092     builder.setSignature(signature).setInstance(instance);
4093     for (Entry<String, String> entry : props.entrySet()) {
4094       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
4095           .setValue(entry.getValue()).build();
4096       builder.addConfiguration(pair);
4097     }
4098 
4099     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
4100         .setProcedure(builder.build()).build();
4101     // run the procedure on the master
4102     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
4103         getConnection()) {
4104       @Override
4105       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
4106         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4107         controller.setCallTimeout(callTimeout);
4108         return master.execProcedure(controller, request);
4109       }
4110     });
4111 
4112     long start = EnvironmentEdgeManager.currentTime();
4113     long max = response.getExpectedTimeout();
4114     long maxPauseTime = max / this.numRetries;
4115     int tries = 0;
4116     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
4117         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
4118     boolean done = false;
4119     while (tries == 0
4120         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
4121       try {
4122         // sleep a backoff <= pauseTime amount
4123         long sleep = getPauseTime(tries++, pause);
4124         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4125         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
4126           "ms while waiting for procedure completion.");
4127         Thread.sleep(sleep);
4128       } catch (InterruptedException e) {
4129         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4130       }
4131       LOG.debug("Getting current status of procedure from master...");
4132       done = isProcedureFinished(signature, instance, props);
4133     }
4134     if (!done) {
4135       throw new IOException("Procedure '" + signature + " : " + instance
4136           + "' wasn't completed in expectedTime:" + max + " ms");
4137     }
4138   }
4139 
4140   /**
4141    * Check the current state of the specified procedure.
4142    * <p>
4143    * There are three possible states:
4144    * <ol>
4145    * <li>running - returns <tt>false</tt></li>
4146    * <li>finished - returns <tt>true</tt></li>
4147    * <li>finished with error - throws the exception that caused the procedure to fail</li>
4148    * </ol>
4149    * <p>
4150    *
4151    * @param signature The signature that uniquely identifies a procedure
4152    * @param instance The instance name of the procedure
4153    * @param props Property/Value pairs of properties passing to the procedure
4154    * @return true if the specified procedure is finished successfully, false if it is still running
4155    * @throws IOException if the specified procedure finished with error
4156    */
4157   @Override
4158   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
4159       throws IOException {
4160     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
4161     builder.setSignature(signature).setInstance(instance);
4162     for (Entry<String, String> entry : props.entrySet()) {
4163       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
4164           .setValue(entry.getValue()).build();
4165       builder.addConfiguration(pair);
4166     }
4167     final ProcedureDescription desc = builder.build();
4168     return executeCallable(
4169         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
4170           @Override
4171           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
4172             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4173             controller.setCallTimeout(callTimeout);
4174             return master.isProcedureDone(controller, IsProcedureDoneRequest
4175                 .newBuilder().setProcedure(desc).build());
4176           }
4177         }).getDone();
4178   }
4179 
4180   /**
4181    * Execute Restore/Clone snapshot and wait for the server to complete (blocking). To check if the
4182    * cloned table exists, use {@link #isTableAvailable} -- it is not safe to create an HTable
4183    * instance to this table before it is available.
4184    * @param snapshotName snapshot to restore
4185    * @param tableName table name to restore the snapshot on
4186    * @throws IOException if a remote or network exception occurs
4187    * @throws RestoreSnapshotException if snapshot failed to be restored
4188    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4189    */
4190   private void internalRestoreSnapshot(final String snapshotName, final TableName tableName,
4191       final boolean restoreAcl)
4192       throws IOException, RestoreSnapshotException {
4193     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
4194         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
4195 
4196     // actually restore the snapshot
4197     internalRestoreSnapshotAsync(snapshot, restoreAcl);
4198 
4199     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
4200         .setSnapshot(snapshot).build();
4201     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
4202         .setDone(false).buildPartial();
4203     final long maxPauseTime = 5000;
4204     int tries = 0;
4205     while (!done.getDone()) {
4206       try {
4207         // sleep a backoff <= pauseTime amount
4208         long sleep = getPauseTime(tries++, pause);
4209         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4210         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
4211         Thread.sleep(sleep);
4212       } catch (InterruptedException e) {
4213         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4214       }
4215       LOG.debug("Getting current status of snapshot restore from master...");
4216       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
4217           getConnection()) {
4218         @Override
4219         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
4220           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4221           controller.setCallTimeout(callTimeout);
4222           return master.isRestoreSnapshotDone(controller, request);
4223         }
4224       });
4225     }
4226     if (!done.getDone()) {
4227       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
4228     }
4229   }
4230 
4231   /**
4232    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
4233    * <p>
4234    * Only a single snapshot should be restored at a time, or results may be undefined.
4235    * @param snapshot snapshot to restore
4236    * @return response from the server indicating the max time to wait for the snapshot
4237    * @throws IOException if a remote or network exception occurs
4238    * @throws RestoreSnapshotException if snapshot failed to be restored
4239    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4240    */
4241   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot,
4242       final boolean restoreAcl) throws IOException, RestoreSnapshotException {
4243     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
4244 
4245     final RestoreSnapshotRequest request =
4246         RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setRestoreACL(restoreAcl).build();
4247 
4248     // run the snapshot restore on the master
4249     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
4250       @Override
4251       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
4252         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4253         controller.setCallTimeout(callTimeout);
4254         return master.restoreSnapshot(controller, request);
4255       }
4256     });
4257   }
4258 
4259   /**
4260    * List completed snapshots.
4261    * @return a list of snapshot descriptors for completed snapshots
4262    * @throws IOException if a network error occurs
4263    */
4264   @Override
4265   public List<SnapshotDescription> listSnapshots() throws IOException {
4266     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
4267       @Override
4268       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
4269         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4270         controller.setCallTimeout(callTimeout);
4271         return master.getCompletedSnapshots(controller,
4272           GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
4273       }
4274     });
4275   }
4276 
4277   /**
4278    * List all the completed snapshots matching the given regular expression.
4279    *
4280    * @param regex The regular expression to match against
4281    * @return - returns a List of SnapshotDescription
4282    * @throws IOException if a remote or network exception occurs
4283    */
4284   @Override
4285   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
4286     return listSnapshots(Pattern.compile(regex));
4287   }
4288 
4289   /**
4290    * List all the completed snapshots matching the given pattern.
4291    *
4292    * @param pattern The compiled regular expression to match against
4293    * @return - returns a List of SnapshotDescription
4294    * @throws IOException if a remote or network exception occurs
4295    */
4296   @Override
4297   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
4298     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
4299     List<SnapshotDescription> snapshots = listSnapshots();
4300     for (SnapshotDescription snapshot : snapshots) {
4301       if (pattern.matcher(snapshot.getName()).matches()) {
4302         matched.add(snapshot);
4303       }
4304     }
4305     return matched;
4306   }
4307 
4308   /**
4309    * Delete an existing snapshot.
4310    * @param snapshotName name of the snapshot
4311    * @throws IOException if a remote or network exception occurs
4312    */
4313   @Override
4314   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
4315     deleteSnapshot(Bytes.toString(snapshotName));
4316   }
4317 
4318   /**
4319    * Delete an existing snapshot.
4320    * @param snapshotName name of the snapshot
4321    * @throws IOException if a remote or network exception occurs
4322    */
4323   @Override
4324   public void deleteSnapshot(final String snapshotName) throws IOException {
4325     // make sure the snapshot is possibly valid
4326     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
4327     // do the delete
4328     executeCallable(new MasterCallable<Void>(getConnection()) {
4329       @Override
4330       public Void call(int callTimeout) throws ServiceException {
4331         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4332         controller.setCallTimeout(callTimeout);
4333         master.deleteSnapshot(controller,
4334           DeleteSnapshotRequest.newBuilder().
4335             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
4336         );
4337         return null;
4338       }
4339     });
4340   }
4341 
4342   /**
4343    * Delete existing snapshots whose names match the pattern passed.
4344    * @param regex The regular expression to match against
4345    * @throws IOException if a remote or network exception occurs
4346    */
4347   @Override
4348   public void deleteSnapshots(final String regex) throws IOException {
4349     deleteSnapshots(Pattern.compile(regex));
4350   }
4351 
4352   /**
4353    * Delete existing snapshots whose names match the pattern passed.
4354    * @param pattern pattern for names of the snapshot to match
4355    * @throws IOException if a remote or network exception occurs
4356    */
4357   @Override
4358   public void deleteSnapshots(final Pattern pattern) throws IOException {
4359     List<SnapshotDescription> snapshots = listSnapshots(pattern);
4360     for (final SnapshotDescription snapshot : snapshots) {
4361       try {
4362         internalDeleteSnapshot(snapshot);
4363       } catch (IOException ex) {
4364         LOG.info(
4365           "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTable(),
4366           ex);
4367       }
4368     }
4369   }
4370 
4371   private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
4372     executeCallable(new MasterCallable<Void>(getConnection()) {
4373       @Override
4374       public Void call(int callTimeout) throws ServiceException {
4375         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4376         controller.setCallTimeout(callTimeout);
4377         this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder()
4378           .setSnapshot(snapshot).build());
4379         return null;
4380       }
4381     });
4382   }
4383 
4384   /**
4385    * Apply the new quota settings.
4386    * @param quota the quota settings
4387    * @throws IOException if a remote or network exception occurs
4388    */
4389   @Override
4390   public void setQuota(final QuotaSettings quota) throws IOException {
4391     executeCallable(new MasterCallable<Void>(getConnection()) {
4392       @Override
4393       public Void call(int callTimeout) throws ServiceException {
4394         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4395         controller.setCallTimeout(callTimeout);
4396         this.master.setQuota(controller, QuotaSettings.buildSetQuotaRequestProto(quota));
4397         return null;
4398       }
4399     });
4400   }
4401 
4402   /**
4403    * Return a Quota Scanner to list the quotas based on the filter.
4404    * @param filter the quota settings filter
4405    * @return the quota scanner
4406    * @throws IOException if a remote or network exception occurs
4407    */
4408   @Override
4409   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
4410     return QuotaRetriever.open(conf, filter);
4411   }
4412 
4413   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
4414     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
4415     try {
4416       return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout);
4417     } finally {
4418       callable.close();
4419     }
4420   }
4421 
4422   private static <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable,
4423              RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout)
4424       throws IOException {
4425     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
4426     try {
4427       return caller.callWithRetries(callable, operationTimeout);
4428     } finally {
4429       callable.close();
4430     }
4431   }
4432   /**
4433    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4434    * connected to the active master.
4435    *
4436    * <p>
4437    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4438    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4439    * </p>
4440    *
4441    * <div style="background-color: #cccccc; padding: 2px">
4442    * <blockquote><pre>
4443    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
4444    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4445    * MyCallRequest request = MyCallRequest.newBuilder()
4446    *     ...
4447    *     .build();
4448    * MyCallResponse response = service.myCall(null, request);
4449    * </pre></blockquote></div>
4450    *
4451    * @return A MasterCoprocessorRpcChannel instance
4452    */
4453   @Override
4454   public CoprocessorRpcChannel coprocessorService() {
4455     return new MasterCoprocessorRpcChannel(connection);
4456   }
4457 
4458   /**
4459    * Simple {@link Abortable}, throwing RuntimeException on abort.
4460    */
4461   private static class ThrowableAbortable implements Abortable {
4462 
4463     @Override
4464     public void abort(String why, Throwable e) {
4465       throw new RuntimeException(why, e);
4466     }
4467 
4468     @Override
4469     public boolean isAborted() {
4470       return true;
4471     }
4472   }
4473 
4474   private static <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable,
4475              RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout) throws IOException {
4476     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
4477     try {
4478       return caller.callWithRetries(callable, operationTimeout);
4479     } finally {
4480       callable.close();
4481     }
4482   }
4483 
4484   /**
4485    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4486    * connected to the passed region server.
4487    *
4488    * <p>
4489    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4490    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4491    * </p>
4492    *
4493    * <div style="background-color: #cccccc; padding: 2px">
4494    * <blockquote><pre>
4495    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
4496    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4497    * MyCallRequest request = MyCallRequest.newBuilder()
4498    *     ...
4499    *     .build();
4500    * MyCallResponse response = service.myCall(null, request);
4501    * </pre></blockquote></div>
4502    *
4503    * @param sn the server name to which the endpoint call is made
4504    * @return A RegionServerCoprocessorRpcChannel instance
4505    */
4506   @Override
4507   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
4508     return new RegionServerCoprocessorRpcChannel(connection, sn);
4509   }
4510 
4511   public static long getPauseTime(int tries, long pause) {
4512     int triesCount = tries;
4513     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
4514       triesCount = HConstants.RETRY_BACKOFF.length - 1;
4515     }
4516     return pause * HConstants.RETRY_BACKOFF[triesCount];
4517   }
4518 
4519   @Override
4520   public void updateConfiguration(ServerName server) throws IOException {
4521     try {
4522       this.connection.getAdmin(server).updateConfiguration(null,
4523         UpdateConfigurationRequest.getDefaultInstance());
4524     } catch (ServiceException e) {
4525       throw ProtobufUtil.getRemoteException(e);
4526     }
4527   }
4528 
4529   @Override
4530   public void updateConfiguration() throws IOException {
4531     for (ServerName server : this.getClusterStatus().getServers()) {
4532       updateConfiguration(server);
4533     }
4534   }
4535 
4536   @Override
4537   public int getMasterInfoPort() throws IOException {
4538     // TODO: Fix!  Reaching into internal implementation!!!!
4539     ConnectionManager.HConnectionImplementation connection =
4540         (ConnectionManager.HConnectionImplementation)this.connection;
4541     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
4542     try {
4543       return MasterAddressTracker.getMasterInfoPort(zkw);
4544     } catch (KeeperException e) {
4545       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
4546     }
4547   }
4548 
4549   private ServerName getMasterAddress() throws IOException {
4550     // TODO: Fix!  Reaching into internal implementation!!!!
4551     HConnectionImplementation connection =
4552             (HConnectionImplementation)this.connection;
4553     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
4554     try {
4555       return MasterAddressTracker.getMasterAddress(zkw);
4556     } catch (KeeperException e) {
4557       throw new IOException("Failed to get master server name from MasterAddressTracker", e);
4558     }
4559   }
4560 
4561   @Override
4562   public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
4563     return executeCallable(new MasterCallable<Long>(getConnection()) {
4564       @Override
4565       public Long call(int callTimeout) throws ServiceException {
4566         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4567         controller.setCallTimeout(callTimeout);
4568         MajorCompactionTimestampRequest req =
4569             MajorCompactionTimestampRequest.newBuilder()
4570                 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
4571         return master.getLastMajorCompactionTimestamp(controller, req).getCompactionTimestamp();
4572       }
4573     });
4574   }
4575 
4576   @Override
4577   public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
4578     return executeCallable(new MasterCallable<Long>(getConnection()) {
4579       @Override
4580       public Long call(int callTimeout) throws ServiceException {
4581         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4582         controller.setCallTimeout(callTimeout);
4583         MajorCompactionTimestampForRegionRequest req =
4584             MajorCompactionTimestampForRegionRequest
4585                 .newBuilder()
4586                 .setRegion(
4587                   RequestConverter
4588                       .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
4589         return master.getLastMajorCompactionTimestampForRegion(controller, req)
4590             .getCompactionTimestamp();
4591       }
4592     });
4593   }
4594 
4595   /**
4596    * {@inheritDoc}
4597    */
4598   @Override
4599   public void compact(final TableName tableName, final byte[] columnFamily, CompactType compactType)
4600     throws IOException, InterruptedException {
4601     compact(tableName, columnFamily, false, compactType);
4602   }
4603 
4604   /**
4605    * {@inheritDoc}
4606    */
4607   @Override
4608   public void compact(final TableName tableName, CompactType compactType)
4609     throws IOException, InterruptedException {
4610     compact(tableName, null, false, compactType);
4611   }
4612 
4613   /**
4614    * {@inheritDoc}
4615    */
4616   @Override
4617   public void majorCompact(final TableName tableName, final byte[] columnFamily,
4618     CompactType compactType) throws IOException, InterruptedException {
4619     compact(tableName, columnFamily, true, compactType);
4620   }
4621 
4622   /**
4623    * {@inheritDoc}
4624    */
4625   @Override
4626   public void majorCompact(final TableName tableName, CompactType compactType)
4627           throws IOException, InterruptedException {
4628       compact(tableName, null, true, compactType);
4629   }
4630 
4631   @Override
4632   public CompactionState getCompactionState(final TableName tableName)
4633       throws IOException {
4634     return getCompactionState(tableName, CompactType.NORMAL);
4635   }
4636 
4637   /**
4638    * Future that waits on a procedure result.
4639    * Returned by the async version of the Admin calls,
4640    * and used internally by the sync calls to wait on the result of the procedure.
4641    */
4642   @InterfaceAudience.Private
4643   @InterfaceStability.Evolving
4644   protected static class ProcedureFuture<V> implements Future<V> {
4645     private ExecutionException exception = null;
4646     private boolean procResultFound = false;
4647     private boolean done = false;
4648     private boolean cancelled = false;
4649     private V result = null;
4650 
4651     private final HBaseAdmin admin;
4652     private final Long procId;
4653 
4654     public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
4655       this.admin = admin;
4656       this.procId = procId;
4657     }
4658 
4659     @Override
4660     public boolean cancel(boolean mayInterruptIfRunning) {
4661       AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
4662           .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
4663       try {
4664         cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
4665         if (cancelled) {
4666           done = true;
4667         }
4668       } catch (IOException e) {
4669         // Cancell thrown exception for some reason. At this time, we are not sure whether
4670         // the cancell succeeds or fails. We assume that it is failed, but print out a warning
4671         // for debugging purpose.
4672         LOG.warn(
4673           "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
4674           e);
4675         cancelled = false;
4676       }
4677       return cancelled;
4678     }
4679 
4680     @Override
4681     public boolean isCancelled() {
4682       return cancelled;
4683     }
4684 
4685     protected AbortProcedureResponse abortProcedureResult(
4686         final AbortProcedureRequest request) throws IOException {
4687       return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
4688           admin.getConnection()) {
4689         @Override
4690         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
4691           PayloadCarryingRpcController controller = admin.getRpcControllerFactory().newController();
4692           controller.setCallTimeout(callTimeout);
4693           return master.abortProcedure(controller, request);
4694         }
4695       });
4696     }
4697 
4698     @Override
4699     public V get() throws InterruptedException, ExecutionException {
4700       // TODO: should we ever spin forever?
4701       throw new UnsupportedOperationException();
4702     }
4703 
4704     @Override
4705     public V get(long timeout, TimeUnit unit)
4706         throws InterruptedException, ExecutionException, TimeoutException {
4707       if (!done) {
4708         long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
4709         try {
4710           try {
4711             // if the master support procedures, try to wait the result
4712             if (procId != null) {
4713               result = waitProcedureResult(procId, deadlineTs);
4714             }
4715             // if we don't have a proc result, try the compatibility wait
4716             if (!procResultFound) {
4717               result = waitOperationResult(deadlineTs);
4718             }
4719             result = postOperationResult(result, deadlineTs);
4720             done = true;
4721           } catch (IOException e) {
4722             result = postOpeartionFailure(e, deadlineTs);
4723             done = true;
4724           }
4725         } catch (IOException e) {
4726           exception = new ExecutionException(e);
4727           done = true;
4728         }
4729       }
4730       if (exception != null) {
4731         throw exception;
4732       }
4733       return result;
4734     }
4735 
4736     @Override
4737     public boolean isDone() {
4738       return done;
4739     }
4740 
4741     protected HBaseAdmin getAdmin() {
4742       return admin;
4743     }
4744 
4745     private V waitProcedureResult(long procId, long deadlineTs)
4746         throws IOException, TimeoutException, InterruptedException {
4747       GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
4748           .setProcId(procId)
4749           .build();
4750 
4751       int tries = 0;
4752       IOException serviceEx = null;
4753       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4754         GetProcedureResultResponse response = null;
4755         try {
4756           // Try to fetch the result
4757           response = getProcedureResult(request);
4758         } catch (IOException e) {
4759           serviceEx = unwrapException(e);
4760 
4761           // the master may be down
4762           LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
4763 
4764           // Not much to do, if we have a DoNotRetryIOException
4765           if (serviceEx instanceof DoNotRetryIOException) {
4766             // TODO: looks like there is no way to unwrap this exception and get the proper
4767             // UnsupportedOperationException aside from looking at the message.
4768             // anyway, if we fail here we just failover to the compatibility side
4769             // and that is always a valid solution.
4770             LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
4771             procResultFound = false;
4772             return null;
4773           }
4774         }
4775 
4776         // If the procedure is no longer running, we should have a result
4777         if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
4778           procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
4779           return convertResult(response);
4780         }
4781 
4782         try {
4783           Thread.sleep(getPauseTime(tries++, getAdmin().pause));
4784         } catch (InterruptedException e) {
4785           throw new InterruptedException(
4786             "Interrupted while waiting for the result of proc " + procId);
4787         }
4788       }
4789       if (serviceEx != null) {
4790         throw serviceEx;
4791       } else {
4792         throw new TimeoutException("The procedure " + procId + " is still running");
4793       }
4794     }
4795 
4796     private static IOException unwrapException(IOException e) {
4797       if (e instanceof RemoteException) {
4798         return ((RemoteException)e).unwrapRemoteException();
4799       }
4800       return e;
4801     }
4802 
4803     protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
4804         throws IOException {
4805       return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
4806           admin.getConnection()) {
4807         @Override
4808         public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
4809           return master.getProcedureResult(null, request);
4810         }
4811       });
4812     }
4813 
4814     /**
4815      * Convert the procedure result response to a specified type.
4816      * @param response the procedure result object to parse
4817      * @return the result data of the procedure.
4818      */
4819     protected V convertResult(final GetProcedureResultResponse response) throws IOException {
4820       if (response.hasException()) {
4821         throw ForeignExceptionUtil.toIOException(response.getException());
4822       }
4823       return null;
4824     }
4825 
4826     /**
4827      * Fallback implementation in case the procedure is not supported by the server.
4828      * It should try to wait until the operation is completed.
4829      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4830      * @return the result data of the operation
4831      */
4832     protected V waitOperationResult(final long deadlineTs)
4833         throws IOException, TimeoutException {
4834       return null;
4835     }
4836 
4837     /**
4838      * Called after the operation is completed and the result fetched.
4839      * this allows to perform extra steps after the procedure is completed.
4840      * it allows to apply transformations to the result that will be returned by get().
4841      * @param result the result of the procedure
4842      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4843      * @return the result of the procedure, which may be the same as the passed one
4844      */
4845     protected V postOperationResult(final V result, final long deadlineTs)
4846         throws IOException, TimeoutException {
4847       return result;
4848     }
4849     /**
4850      * Called after the operation is terminated with a failure.
4851      * this allows to perform extra steps after the procedure is terminated.
4852      * it allows to apply transformations to the result that will be returned by get().
4853      * The default implementation will rethrow the exception
4854      * @param exception the exception got from fetching the result
4855      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4856      * @return the result of the procedure, which may be the same as the passed one
4857      */
4858     protected V postOperationFailure(final IOException exception, final long deadlineTs)
4859         throws IOException, TimeoutException {
4860       throw exception;
4861     }
4862 
4863     /**
4864      * Called after the operation is terminated with a failure.
4865      * this allows to perform extra steps after the procedure is terminated.
4866      * it allows to apply transformations to the result that will be returned by get().
4867      * The default implementation will rethrow the exception
4868      * @param exception the exception got from fetching the result
4869      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4870      * @return the result of the procedure, which may be the same as the passed one
4871      */
4872     protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
4873         throws IOException, TimeoutException {
4874       throw exception;
4875     }
4876 
4877     protected interface WaitForStateCallable {
4878       boolean checkState(int tries) throws IOException;
4879       void throwInterruptedException() throws InterruptedIOException;
4880       void throwTimeoutException(long elapsed) throws TimeoutException;
4881     }
4882 
4883     protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
4884         throws IOException, TimeoutException {
4885       int tries = 0;
4886       IOException serverEx = null;
4887       long startTime = EnvironmentEdgeManager.currentTime();
4888       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4889         serverEx = null;
4890         try {
4891           if (callable.checkState(tries)) {
4892             return;
4893           }
4894         } catch (IOException e) {
4895           serverEx = e;
4896         }
4897         long sleepTime = getPauseTime(tries++, getAdmin().pause);
4898         try {
4899           Thread.sleep(sleepTime);
4900         } catch (InterruptedException e) {
4901           LOG.warn("Interrupted while waiting for table DDL operation.  maxPauseTime= "
4902             + sleepTime + " for tries= " + (tries-1));
4903           callable.throwInterruptedException();
4904         }
4905       }
4906       if (serverEx != null) {
4907         throw unwrapException(serverEx);
4908       } else {
4909         callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
4910       }
4911     }
4912   }
4913   
4914   @InterfaceAudience.Private
4915   @InterfaceStability.Evolving
4916   protected static abstract class TableFuture<V> extends ProcedureFuture<V> {
4917     private final TableName tableName;
4918 
4919     public TableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
4920       super(admin, procId);
4921       this.tableName = tableName;
4922     }
4923 
4924     @Override
4925     public String toString() {
4926       return getDescription();
4927     }
4928 
4929     /**
4930      * @return the table name
4931      */
4932     protected TableName getTableName() {
4933       return tableName;
4934     }
4935 
4936     /**
4937      * @return the table descriptor
4938      */
4939     protected HTableDescriptor getTableDescriptor() throws IOException {
4940       return getAdmin().getTableDescriptorByTableName(getTableName());
4941     }
4942     /**
4943      * @return the operation type like CREATE, DELETE, DISABLE etc.
4944      */
4945     public abstract String getOperationType();
4946 
4947     /**
4948      * @return a description of the operation
4949      */
4950     protected String getDescription() {
4951       return "Operation: " + getOperationType() + ", "
4952           + "Table Name: " + tableName.getNameWithNamespaceInclAsString();
4953 
4954     };
4955 
4956     protected abstract class TableWaitForStateCallable implements WaitForStateCallable {
4957       @Override
4958       public void throwInterruptedException() throws InterruptedIOException {
4959         throw new InterruptedIOException("Interrupted while waiting for operation: "
4960             + getOperationType() + " on table: " + tableName.getNameWithNamespaceInclAsString());
4961       }
4962 
4963       @Override
4964       public void throwTimeoutException(long elapsedTime) throws TimeoutException {
4965         throw new TimeoutException("The operation: " + getOperationType() + " on table: " +
4966             tableName.getNameAsString() + " has not completed after " + elapsedTime + "ms");
4967       }
4968     }
4969 
4970     @Override
4971     protected V postOperationResult(final V result, final long deadlineTs)
4972         throws IOException, TimeoutException {
4973       LOG.info(getDescription() + " completed");
4974       return super.postOperationResult(result, deadlineTs);
4975     }
4976 
4977     @Override
4978     protected V postOperationFailure(final IOException exception, final long deadlineTs)
4979         throws IOException, TimeoutException {
4980       LOG.info(getDescription() + " failed with " + exception.getMessage());
4981       return super.postOperationFailure(exception, deadlineTs);
4982     }
4983 
4984     protected void waitForTableEnabled(final long deadlineTs)
4985         throws IOException, TimeoutException {
4986       waitForState(deadlineTs, new TableWaitForStateCallable() {
4987         @Override
4988         public boolean checkState(int tries) throws IOException {
4989           try {
4990             if (getAdmin().isTableAvailable(tableName)) {
4991               return true;
4992             }
4993           } catch (TableNotFoundException tnfe) {
4994             LOG.debug("Table " + tableName.getNameWithNamespaceInclAsString()
4995                 + " was not enabled, sleeping. tries=" + tries);
4996           }
4997           return false;
4998         }
4999       });
5000     }
5001 
5002     protected void waitForTableDisabled(final long deadlineTs)
5003         throws IOException, TimeoutException {
5004       waitForState(deadlineTs, new TableWaitForStateCallable() {
5005         @Override
5006         public boolean checkState(int tries) throws IOException {
5007           return getAdmin().isTableDisabled(tableName);
5008         }
5009       });
5010     }
5011 
5012     protected void waitTableNotFound(final long deadlineTs)
5013         throws IOException, TimeoutException {
5014       waitForState(deadlineTs, new TableWaitForStateCallable() {
5015         @Override
5016         public boolean checkState(int tries) throws IOException {
5017           return !getAdmin().tableExists(tableName);
5018         }
5019       });
5020     }
5021 
5022     protected void waitForSchemaUpdate(final long deadlineTs)
5023         throws IOException, TimeoutException {
5024       waitForState(deadlineTs, new TableWaitForStateCallable() {
5025         @Override
5026         public boolean checkState(int tries) throws IOException {
5027           return getAdmin().getAlterStatus(tableName).getFirst() == 0;
5028         }
5029       });
5030     }
5031 
5032     protected void waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
5033         throws IOException, TimeoutException {
5034       final HTableDescriptor desc = getTableDescriptor();
5035       final AtomicInteger actualRegCount = new AtomicInteger(0);
5036       final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
5037         @Override
5038         public boolean visit(Result rowResult) throws IOException {
5039           RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
5040           if (list == null) {
5041             LOG.warn("No serialized HRegionInfo in " + rowResult);
5042             return true;
5043           }
5044           HRegionLocation l = list.getRegionLocation();
5045           if (l == null) {
5046             return true;
5047           }
5048           if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
5049             return false;
5050           }
5051           if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
5052           HRegionLocation[] locations = list.getRegionLocations();
5053           for (HRegionLocation location : locations) {
5054             if (location == null) continue;
5055             ServerName serverName = location.getServerName();
5056             // Make sure that regions are assigned to server
5057             if (serverName != null && serverName.getHostAndPort() != null) {
5058               actualRegCount.incrementAndGet();
5059             }
5060           }
5061           return true;
5062         }
5063       };
5064 
5065       int tries = 0;
5066       int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
5067       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
5068         actualRegCount.set(0);
5069         // MetaTableAccessor.scanMetaForTableRegions(getAdmin().getConnection(), visitor,
5070         //   desc.getTableName());
5071         if (actualRegCount.get() == numRegs) {
5072           // all the regions are online
5073           return;
5074         }
5075 
5076         try {
5077           Thread.sleep(getPauseTime(tries++, getAdmin().pause));
5078         } catch (InterruptedException e) {
5079           throw new InterruptedIOException("Interrupted when opening" + " regions; "
5080               + actualRegCount.get() + " of " + numRegs + " regions processed so far");
5081         }
5082       }
5083       throw new TimeoutException("Only " + actualRegCount.get() + " of " + numRegs
5084           + " regions are online; retries exhausted.");
5085     }
5086   }
5087 
5088   @Override
5089   public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous,
5090                                           final MasterSwitchType... switchTypes)
5091     throws IOException {
5092     return executeCallable(new MasterCallable<boolean[]>(getConnection()) {
5093       @Override
5094       public boolean[] call(int callTimeout) throws ServiceException {
5095         MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null,
5096           RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes));
5097         boolean[] result = new boolean[switchTypes.length];
5098         int i = 0;
5099         for (Boolean prevValue : response.getPrevValueList()) {
5100           result[i++] = prevValue;
5101         }
5102         return result;
5103       }
5104     });
5105   }
5106 
5107   @Override
5108   public boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException {
5109     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
5110       @Override
5111       public Boolean call(int callTimeout) throws ServiceException {
5112         return master.isSplitOrMergeEnabled(null,
5113           RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType)).getEnabled();
5114       }
5115     });
5116   }
5117 
5118   private HRegionInfo getMobRegionInfo(TableName tableName) {
5119     return new HRegionInfo(tableName, Bytes.toBytes(".mob"),
5120             HConstants.EMPTY_END_ROW, false, 0);
5121   }
5122 
5123   @Override
5124   public BackupAdmin getBackupAdmin() throws IOException {
5125     return new HBaseBackupAdmin(this);
5126   }
5127 
5128   private RpcControllerFactory getRpcControllerFactory() {
5129     return rpcControllerFactory;
5130   }
5131 }