View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master.handler;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.util.ArrayList;
24  import java.util.HashMap;
25  import java.util.List;
26  import java.util.Map;
27  import java.util.Set;
28  import java.util.concurrent.locks.Lock;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.commons.logging.LogFactory;
32  import org.apache.hadoop.hbase.HConstants;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.MetaTableAccessor;
35  import org.apache.hadoop.hbase.Server;
36  import org.apache.hadoop.hbase.ServerName;
37  import org.apache.hadoop.hbase.classification.InterfaceAudience;
38  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
39  import org.apache.hadoop.hbase.executor.EventHandler;
40  import org.apache.hadoop.hbase.executor.EventType;
41  import org.apache.hadoop.hbase.master.AssignmentManager;
42  import org.apache.hadoop.hbase.master.DeadServer;
43  import org.apache.hadoop.hbase.master.MasterFileSystem;
44  import org.apache.hadoop.hbase.master.MasterServices;
45  import org.apache.hadoop.hbase.master.RegionState;
46  import org.apache.hadoop.hbase.master.RegionState.State;
47  import org.apache.hadoop.hbase.master.RegionStates;
48  import org.apache.hadoop.hbase.master.ServerManager;
49  import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
50  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
51  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
52  import org.apache.hadoop.hbase.util.ConfigUtil;
53  import org.apache.hadoop.hbase.zookeeper.ZKAssign;
54  import org.apache.zookeeper.KeeperException;
55  
56  /**
57   * Process server shutdown.
58   * Server-to-handle must be already in the deadservers lists.  See
59   * {@link ServerManager#expireServer(ServerName)}
60   */
61  @InterfaceAudience.Private
62  public class ServerShutdownHandler extends EventHandler {
63    private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class);
64    protected final ServerName serverName;
65    protected final MasterServices services;
66    protected final DeadServer deadServers;
67    protected final boolean shouldSplitWal; // whether to split WAL or not
68    protected final int regionAssignmentWaitTimeout;
69  
70    public ServerShutdownHandler(final Server server, final MasterServices services,
71        final DeadServer deadServers, final ServerName serverName,
72        final boolean shouldSplitWal) {
73      this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
74          shouldSplitWal);
75    }
76  
77    ServerShutdownHandler(final Server server, final MasterServices services,
78        final DeadServer deadServers, final ServerName serverName, EventType type,
79        final boolean shouldSplitWal) {
80      super(server, type);
81      this.serverName = serverName;
82      this.server = server;
83      this.services = services;
84      this.deadServers = deadServers;
85      if (!this.deadServers.isDeadServer(this.serverName)) {
86        LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
87      }
88      this.shouldSplitWal = shouldSplitWal;
89      this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
90        HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
91    }
92  
93    @Override
94    public String getInformativeName() {
95      if (serverName != null) {
96        return this.getClass().getSimpleName() + " for " + serverName;
97      } else {
98        return super.getInformativeName();
99      }
100   }
101 
102   /**
103    * @return True if the server we are processing was carrying <code>hbase:meta</code>
104    */
105   boolean isCarryingMeta() {
106     return false;
107   }
108 
109   @Override
110   public String toString() {
111     return getClass().getSimpleName() + "-" + serverName + "-" + getSeqid();
112   }
113 
114   @Override
115   public void process() throws IOException {
116     boolean hasLogReplayWork = false;
117     final ServerName serverName = this.serverName;
118     try {
119 
120       // We don't want worker thread in the MetaServerShutdownHandler
121       // executor pool to block by waiting availability of hbase:meta
122       // Otherwise, it could run into the following issue:
123       // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
124       //    to come online.
125       // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
126       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
127       //    instance For RS1 will still be blocked.
128       // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
129       // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
130       //    it opens the hbase:meta region. So the MetaServerShutdownHandler
131       //    instance For RS1 and RS2 will still be blocked.
132       // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
133       // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
134       // The solution here is to resubmit a ServerShutdownHandler request to process
135       // user regions on that server so that MetaServerShutdownHandler
136       // executor pool is always available.
137       //
138       // If AssignmentManager hasn't finished rebuilding user regions,
139       // we are not ready to assign dead regions either. So we re-queue up
140       // the dead server for further processing too.
141       AssignmentManager am = services.getAssignmentManager();
142       ServerManager serverManager = services.getServerManager();
143       if (isCarryingMeta() /* hbase:meta */ || !am.isFailoverCleanupDone()) {
144         serverManager.processDeadServer(serverName, this.shouldSplitWal);
145         return;
146       }
147 
148       // Wait on meta to come online; we need it to progress.
149       // TODO: Best way to hold strictly here?  We should build this retry logic
150       // into the MetaTableAccessor operations themselves.
151       // TODO: Is the reading of hbase:meta necessary when the Master has state of
152       // cluster in its head?  It should be possible to do without reading hbase:meta
153       // in all but one case. On split, the RS updates the hbase:meta
154       // table and THEN informs the master of the split via zk nodes in
155       // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
156       // the regionserver dies, these nodes do not stick around and this server
157       // shutdown processing does fixup (see the fixupDaughters method below).
158       // If we wanted to skip the hbase:meta scan, we'd have to change at least the
159       // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
160       // completed (zk is updated after edits to hbase:meta have gone in).  See
161       // {@link SplitTransaction}.  We'd also have to be figure another way for
162       // doing the below hbase:meta daughters fixup.
163       Set<HRegionInfo> hris = null;
164       while (!this.server.isStopped()) {
165         try {
166           server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
167           if (BaseLoadBalancer.tablesOnMaster(server.getConfiguration())) {
168             while (!this.server.isStopped() && serverManager.countOfRegionServers() < 2) {
169               // Wait till at least another regionserver is up besides the active master
170               // so that we don't assign all regions to the active master.
171               // This is best of efforts, because newly joined regionserver
172               // could crash right after that.
173               Thread.sleep(100);
174             }
175           }
176           // Skip getting user regions if the server is stopped.
177           if (!this.server.isStopped()) {
178             if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
179               hris = MetaTableAccessor.getServerUserRegions(this.server.getConnection(),
180                 this.serverName).keySet();
181             } else {
182               // Not using ZK for assignment, regionStates has everything we want
183               hris = am.getRegionStates().getServerRegions(serverName);
184             }
185           }
186           break;
187         } catch (InterruptedException e) {
188           Thread.currentThread().interrupt();
189           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
190         } catch (IOException ioe) {
191           LOG.info("Received exception accessing hbase:meta during server shutdown of " +
192             serverName + ", retrying hbase:meta read", ioe);
193         }
194       }
195       if (this.server.isStopped()) {
196         throw new IOException("Server is stopped");
197       }
198 
199       // delayed to set recovery mode based on configuration only after all outstanding splitlogtask
200       // drained
201       this.services.getMasterFileSystem().setLogRecoveryMode();
202       boolean distributedLogReplay = 
203         (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
204 
205       try {
206         if (this.shouldSplitWal) {
207           if (distributedLogReplay) {
208             LOG.info("Mark regions in recovery for crashed server " + serverName +
209               " before assignment; regions=" + hris);
210             MasterFileSystem mfs = this.services.getMasterFileSystem();
211             mfs.prepareLogReplay(serverName, hris);
212           } else {
213             LOG.info("Splitting logs for " + serverName +
214               " before assignment; region count=" + (hris == null ? 0 : hris.size()));
215             this.services.getMasterFileSystem().splitLog(serverName);
216           }
217           am.getRegionStates().logSplit(serverName);
218         } else {
219           LOG.info("Skipping log splitting for " + serverName);
220         }
221       } catch (IOException ioe) {
222         resubmit(serverName, ioe);
223       }
224       List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
225       int replicaCount = services.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
226           HConstants.DEFAULT_META_REPLICA_NUM);
227       for (int i = 1; i < replicaCount; i++) {
228         HRegionInfo metaHri =
229             RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, i);
230         if (am.isCarryingMetaReplica(serverName, metaHri) ==
231             AssignmentManager.ServerHostRegion.HOSTING_REGION) {
232           LOG.info("Reassigning meta replica" + metaHri + " that was on " + serverName);
233           toAssignRegions.add(metaHri);
234         }
235       }
236       // Clean out anything in regions in transition.  Being conservative and
237       // doing after log splitting.  Could do some states before -- OPENING?
238       // OFFLINE? -- and then others after like CLOSING that depend on log
239       // splitting.
240       List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
241       LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
242         " region(s) that " + (serverName == null? "null": serverName)  +
243         " was carrying (and " + regionsInTransition.size() +
244         " regions(s) that were opening on this server)");
245       
246       toAssignRegions.addAll(regionsInTransition);
247 
248       // Iterate regions that were on this server and assign them
249       if (hris != null && !hris.isEmpty()) {
250         RegionStates regionStates = am.getRegionStates();
251         for (HRegionInfo hri: hris) {
252           if (regionsInTransition.contains(hri)) {
253             continue;
254           }
255           String encodedName = hri.getEncodedName();
256           Lock lock = am.acquireRegionLock(encodedName);
257           try {
258             RegionState rit = regionStates.getRegionTransitionState(hri);
259             if (processDeadRegion(hri, am)) { 
260               ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
261               if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
262                 // If this region is in transition on the dead server, it must be
263                 // opening or pending_open, which should have been covered by AM#processServerShutdown
264                 LOG.info("Skip assigning region " + hri.getRegionNameAsString()
265                   + " because it has been opened in " + addressFromAM.getServerName());
266                 continue;
267               }
268               if (rit != null) {
269                 if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
270                   // Skip regions that are in transition on other server
271                   LOG.info("Skip assigning region in transition on other server" + rit);
272                   continue;
273                 }
274                 try{
275                   //clean zk node
276                   LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
277                   ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
278                   regionStates.updateRegionState(hri, State.OFFLINE);
279                 } catch (KeeperException ke) {
280                   this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
281                   return;
282                 }
283               } else if (regionStates.isRegionInState(
284                   hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
285                 regionStates.updateRegionState(hri, State.OFFLINE);
286               }
287               toAssignRegions.add(hri);
288             } else if (rit != null) {
289               if ((rit.isPendingCloseOrClosing() || rit.isOffline())
290                   && am.getTableStateManager().isTableState(hri.getTable(),
291                   ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
292                   am.getReplicasToClose().contains(hri)) {
293                 // If the table was partially disabled and the RS went down, we should clear the RIT
294                 // and remove the node for the region.
295                 // The rit that we use may be stale in case the table was in DISABLING state
296                 // but though we did assign we will not be clearing the znode in CLOSING state.
297                 // Doing this will have no harm. See HBASE-5927
298                 regionStates.updateRegionState(hri, State.OFFLINE);
299                 am.deleteClosingOrClosedNode(hri, rit.getServerName());
300                 am.offlineDisabledRegion(hri);
301               } else {
302                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
303                   + rit + " not to be assigned by SSH of server " + serverName);
304               }
305             }
306           } finally {
307             lock.unlock();
308           }
309         }
310       }
311 
312       // Determine what type of assignment to do if the dead server already restarted.
313       boolean retainAssignment =
314           (server.getConfiguration().getBoolean("hbase.master.retain.assignment", true) &&
315           serverManager.isServerWithSameHostnamePortOnline(serverName)) ? true : false;
316 
317       try {
318         if (retainAssignment) {
319           Map<HRegionInfo, ServerName> toAssignRegionsMap =
320               new HashMap<HRegionInfo, ServerName>(toAssignRegions.size());
321           for (HRegionInfo hri: toAssignRegions) {
322             toAssignRegionsMap.put(hri, serverName);
323           }
324           LOG.info("Best effort in SSH to retain assignment of " + toAssignRegions.size()
325             + " regions from the dead server " + serverName);
326           am.assign(toAssignRegionsMap);
327         } else {
328           LOG.info("Using round robin in SSH to assign " + toAssignRegions.size()
329           + " regions from the dead server " + serverName);
330           am.assign(toAssignRegions);
331         }
332       } catch (InterruptedException ie) {
333         LOG.error("Caught " + ie + " during " + (retainAssignment ? "retaining" : "round-robin")
334           + " assignment");
335         throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
336       } catch (IOException ioe) {
337         LOG.warn("Caught " + ioe + " during region assignment, will retry");
338         // Only do wal splitting if shouldSplitWal and in DLR mode
339         serverManager.processDeadServer(serverName,
340           this.shouldSplitWal && distributedLogReplay);
341         return;
342       }
343 
344       if (this.shouldSplitWal && distributedLogReplay) {
345         // wait for region assignment completes
346         for (HRegionInfo hri : toAssignRegions) {
347           try {
348             if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
349               // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
350               // when replay happens before region assignment completes.
351               LOG.warn("Region " + hri.getEncodedName()
352                   + " didn't complete assignment in time");
353             }
354           } catch (InterruptedException ie) {
355             throw new InterruptedIOException("Caught " + ie
356                 + " during waitOnRegionToClearRegionsInTransition");
357           }
358         }
359         // submit logReplay work
360         this.services.getExecutorService().submit(
361           new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
362         hasLogReplayWork = true;
363       }
364     } finally {
365       this.deadServers.finish(serverName);
366     }
367 
368     if (!hasLogReplayWork) {
369       LOG.info("Finished processing of shutdown of " + serverName);
370     }
371   }
372 
373   private void resubmit(final ServerName serverName, IOException ex) throws IOException {
374     // typecast to SSH so that we make sure that it is the SSH instance that
375     // gets submitted as opposed to MSSH or some other derived instance of SSH
376     this.services.getExecutorService().submit((ServerShutdownHandler) this);
377     this.deadServers.add(serverName);
378     throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
379   }
380 
381   /**
382    * Process a dead region from a dead RS. Checks if the region is disabled or
383    * disabling or if the region has a partially completed split.
384    * @param hri
385    * @param assignmentManager
386    * @return Returns true if specified region should be assigned, false if not.
387    * @throws IOException
388    */
389   public static boolean processDeadRegion(HRegionInfo hri,
390       AssignmentManager assignmentManager)
391   throws IOException {
392     boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
393     if (!tablePresent) {
394       LOG.info("The table " + hri.getTable()
395           + " was deleted.  Hence not proceeding.");
396       return false;
397     }
398     // If table is not disabled but the region is offlined,
399     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
400       ZooKeeperProtos.Table.State.DISABLED);
401     if (disabled){
402       LOG.info("The table " + hri.getTable()
403           + " was disabled.  Hence not proceeding.");
404       return false;
405     }
406     if (hri.isOffline() && hri.isSplit()) {
407       //HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation.
408       //If the meta scanner saw the parent split, then it should see the daughters as assigned
409       //to the dead server. We don't have to do anything.
410       return false;
411     }
412     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
413       ZooKeeperProtos.Table.State.DISABLING);
414     if (disabling) {
415       LOG.info("The table " + hri.getTable()
416           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
417       return false;
418     }
419     return true;
420   }
421 }