View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.coprocessor;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.IOException;
25  import java.util.Collections;
26  import java.util.concurrent.ExecutorService;
27  import java.util.concurrent.SynchronousQueue;
28  import java.util.concurrent.ThreadPoolExecutor;
29  import java.util.concurrent.TimeUnit;
30  
31  import org.apache.hadoop.hbase.HBaseTestingUtility;
32  import org.apache.hadoop.hbase.HColumnDescriptor;
33  import org.apache.hadoop.hbase.HTableDescriptor;
34  import org.apache.hadoop.hbase.testclassification.MediumTests;
35  import org.apache.hadoop.hbase.TableName;
36  import org.apache.hadoop.hbase.client.Admin;
37  import org.apache.hadoop.hbase.client.Durability;
38  import org.apache.hadoop.hbase.client.HTable;
39  import org.apache.hadoop.hbase.client.Put;
40  import org.apache.hadoop.hbase.client.Result;
41  import org.apache.hadoop.hbase.client.ResultScanner;
42  import org.apache.hadoop.hbase.client.Scan;
43  import org.apache.hadoop.hbase.client.Table;
44  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
45  import org.apache.hadoop.hbase.util.Threads;
46  import org.junit.After;
47  import org.junit.AfterClass;
48  import org.junit.BeforeClass;
49  import org.junit.Test;
50  import org.junit.experimental.categories.Category;
51  
52  /**
53   * Test that a coprocessor can open a connection and write to another table, inside a hook.
54   */
55  @Category(MediumTests.class)
56  public class TestOpenTableInCoprocessor {
57  
58    private static final TableName otherTable = TableName.valueOf("otherTable");
59    private static final TableName primaryTable = TableName.valueOf("primary");
60    private static final byte[] family = new byte[] { 'f' };
61  
62    private static boolean[] completed = new boolean[1];
63    /**
64     * Custom coprocessor that just copies the write to another table.
65     */
66    public static class SendToOtherTableCoprocessor extends BaseRegionObserver {
67  
68      @Override
69      public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
70          final WALEdit edit, final Durability durability) throws IOException {
71        Table table = e.getEnvironment().getTable(otherTable);
72        table.put(put);
73        completed[0] = true;
74        table.close();
75      }
76  
77    }
78  
79    private static boolean[] completedWithPool = new boolean[1];
80    /**
81     * Coprocessor that creates an HTable with a pool to write to another table
82     */
83    public static class CustomThreadPoolCoprocessor extends BaseRegionObserver {
84  
85      /**
86       * Get a pool that has only ever one thread. A second action added to the pool (running
87       * concurrently), will cause an exception.
88       * @return
89       */
90      private ExecutorService getPool() {
91        int maxThreads = 1;
92        long keepAliveTime = 60;
93        ThreadPoolExecutor pool =
94            new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
95                new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("hbase-table"));
96        pool.allowCoreThreadTimeOut(true);
97        return pool;
98      }
99  
100     @Override
101     public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
102         final WALEdit edit, final Durability durability) throws IOException {
103       Table table = e.getEnvironment().getTable(otherTable, getPool());
104       Put p = new Put(new byte[] { 'a' });
105       p.add(family, null, new byte[] { 'a' });
106       try {
107         table.batch(Collections.singletonList(put));
108       } catch (InterruptedException e1) {
109         throw new IOException(e1);
110       }
111       completedWithPool[0] = true;
112       table.close();
113     }
114   }
115 
116   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
117 
118   @BeforeClass
119   public static void setupCluster() throws Exception {
120     UTIL.startMiniCluster();
121   }
122 
123   @After
124   public void cleanupTestTable() throws Exception {
125     UTIL.getHBaseAdmin().disableTable(primaryTable);
126     UTIL.getHBaseAdmin().deleteTable(primaryTable);
127 
128     UTIL.getHBaseAdmin().disableTable(otherTable);
129     UTIL.getHBaseAdmin().deleteTable(otherTable);
130 
131   }
132 
133   @AfterClass
134   public static void teardownCluster() throws Exception {
135     UTIL.shutdownMiniCluster();
136   }
137 
138   @Test
139   public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable {
140     runCoprocessorConnectionToRemoteTable(SendToOtherTableCoprocessor.class, completed);
141   }
142 
143   @Test
144   public void testCoprocessorCanCreateConnectionToRemoteTableWithCustomPool() throws Throwable {
145     runCoprocessorConnectionToRemoteTable(CustomThreadPoolCoprocessor.class, completedWithPool);
146   }
147 
148   private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
149       boolean[] completeCheck) throws Throwable {
150     HTableDescriptor primary = new HTableDescriptor(primaryTable);
151     primary.addFamily(new HColumnDescriptor(family));
152     // add our coprocessor
153     primary.addCoprocessor(clazz.getName());
154 
155     HTableDescriptor other = new HTableDescriptor(otherTable);
156     other.addFamily(new HColumnDescriptor(family));
157 
158 
159     Admin admin = UTIL.getHBaseAdmin();
160     admin.createTable(primary);
161     admin.createTable(other);
162 
163     Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
164     Put p = new Put(new byte[] { 'a' });
165     p.add(family, null, new byte[] { 'a' });
166     table.put(p);
167     table.close();
168 
169     Table target = new HTable(UTIL.getConfiguration(), otherTable);
170     assertTrue("Didn't complete update to target table!", completeCheck[0]);
171     assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
172     target.close();
173   }
174 
175   /**
176    * Count the number of keyvalue in the table. Scans all possible versions
177    * @param table table to scan
178    * @return number of keyvalues over all rows in the table
179    * @throws IOException
180    */
181   private int getKeyValueCount(Table table) throws IOException {
182     Scan scan = new Scan();
183     scan.setMaxVersions(Integer.MAX_VALUE - 1);
184 
185     ResultScanner results = table.getScanner(scan);
186     int count = 0;
187     for (Result res : results) {
188       count += res.listCells().size();
189       System.out.println(count + ") " + res);
190     }
191     results.close();
192 
193     return count;
194   }
195 }