首先需要新建JAVA项目,添加的包有:有关Hadoop的hadoop-core-0.20.204.0.jar,有关Hbase的hbase-0.90.4.jar、hbase-0.90.4-tests.jar以及Hbase资源包中lib目录下的所有jar包
代码如下:
1. import java.io.IOException;
2. import java.util.ArrayList;
3. import java.util.List;
4. import org.apache.hadoop.conf.Configuration;
5. import org.apache.hadoop.hbase.HBaseConfiguration;
6. import org.apache.hadoop.hbase.HColumnDescriptor;
7. import org.apache.hadoop.hbase.HTableDescriptor;
8. import org.apache.hadoop.hbase.KeyValue;
9. import org.apache.hadoop.hbase.MasterNotRunningException;
10. import org.apache.hadoop.hbase.ZooKeeperConnectionException;
11. import org.apache.hadoop.hbase.client.Delete;
12. import org.apache.hadoop.hbase.client.Get;
13. import org.apache.hadoop.hbase.client.HBaseAdmin;
14. import org.apache.hadoop.hbase.client.HTable;
15. import org.apache.hadoop.hbase.client.HTablePool;
16. import org.apache.hadoop.hbase.client.Put;
17. import org.apache.hadoop.hbase.client.Result;
18. import org.apache.hadoop.hbase.client.ResultScanner;
19. import org.apache.hadoop.hbase.client.Scan;
20. import org.apache.hadoop.hbase.filter.Filter;
21. import org.apache.hadoop.hbase.filter.FilterList;
22. import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
23. import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
24. import org.apache.hadoop.hbase.util.Bytes;
25. public class JinTaoTest {
26. public static Configuration configuration;
27. static {
28. configuration = HBaseConfiguration.create();
29. configuration.set("hbase.zookeeper.property.clientPort", "2181");
30. configuration.set("hbase.zookeeper.quorum", "192.168.1.100");
31. configuration.set("hbase.master", "192.168.1.100:600000");
32. }
33. public static void main(String[] args) {
34. createTable("wujintao");
35. insertData("wujintao");
36. QueryAll("wujintao");
37. QueryByCondition1("wujintao");
38. QueryByCondition2("wujintao");
39. QueryByCondition3("wujintao");
40. deleteRow("wujintao","abcdef");
41. deleteByCondition("wujintao","abcdef");
42. }
43. public static void createTable(String tableName) {
44. System.out.println("start create table ......");
45. try {
46. HBaseAdmin hBaseAdmin = new HBaseAdmin(configuration);
47. if (hBaseAdmin.tableExists(tableName)) {// 如果存在要创建的表,那么先删除,再创建
48. hBaseAdmin.disableTable(tableName);
49. hBaseAdmin.deleteTable(tableName);
50. System.out.println(tableName + " is exist,detele....");
51. }
52. HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
53. tableDescriptor.addFamily(new HColumnDescriptor("column1"));
54. tableDescriptor.addFamily(new HColumnDescriptor("column2"));
55. tableDescriptor.addFamily(new HColumnDescriptor("column3"));
56. hBaseAdmin.createTable(tableDescriptor);
57. } catch (MasterNotRunningException e) {
58. e.printStackTrace();
59. } catch (ZooKeeperConnectionException e) {
60. e.printStackTrace();
61. } catch (IOException e) {
62. e.printStackTrace();
63. }
64. System.out.println("end create table ......");
65. }
66. public static void insertData(String tableName) {
67. System.out.println("start insert data ......");
68. HTablePool pool = new HTablePool(configuration, 1000);
69. HTable table = (HTable) pool.getTable(tableName);
70. Put put = new Put("112233bbbcccc".getBytes());// 一个PUT代表一行数据,再NEW一个PUT表示第二行数据,每行一个唯一的ROWKEY,此处rowkey为put构造方法中传入的值
71. put.add("column1".getBytes(), null, "aaa".getBytes());// 本行数据的第一列
72. put.add("column2".getBytes(), null, "bbb".getBytes());// 本行数据的第三列
73. put.add("column3".getBytes(), null, "ccc".getBytes());// 本行数据的第三列
74. try {
75. table.put(put);
76. } catch (IOException e) {
77. e.printStackTrace();
78. }
79. System.out.println("end insert data ......");
80. }
81. public static void dropTable(String tableName) {
82. try {
83. HBaseAdmin admin = new HBaseAdmin(configuration);
84. admin.disableTable(tableName);
85. admin.deleteTable(tableName);
86. } catch (MasterNotRunningException e) {
87. e.printStackTrace();
88. } catch (ZooKeeperConnectionException e) {
89. e.printStackTrace();
90. } catch (IOException e) {
91. e.printStackTrace();
92. }
93. }
94. public static void deleteRow(String tablename, String rowkey) {
95. try {
96. HTable table = new HTable(configuration, tablename);
97. List list = new ArrayList();
98. Delete d1 = new Delete(rowkey.getBytes());
99. list.add(d1);
100. table.delete(list);
101. System.out.println("删除行成功!");
102. } catch (IOException e) {
103. e.printStackTrace();
104. }
105. }
106. public static void deleteByCondition(String tablename, String rowkey) {
107. //目前还没有发现有效的API能够实现 根据非rowkey的条件删除 这个功能能,还有清空表全部数据的API操作
108. }
109. public static void QueryAll(String tableName) {
110. HTablePool pool = new HTablePool(configuration, 1000);
111. HTable table = (HTable) pool.getTable(tableName);
112. try {
113. ResultScanner rs = table.getScanner(new Scan());
114. for (Result r : rs) {
115. System.out.println("获得到rowkey:" + new String(r.getRow()));
116. for (KeyValue keyValue : r.raw()) {
117. System.out.println("列:" + new String(keyValue.getFamily())
118. + "====值:" + new String(keyValue.getValue()));
119. }
120. }
121. } catch (IOException e) {
122. e.printStackTrace();
123. }
124. }
125. public static void QueryByCondition1(String tableName) {
126. HTablePool pool = new HTablePool(configuration, 1000);
127. HTable table = (HTable) pool.getTable(tableName);
128. try {
129. Get scan = new Get("abcdef".getBytes());// 根据rowkey查询
130. Result r = table.get(scan);
131. System.out.println("获得到rowkey:" + new String(r.getRow()));
132. for (KeyValue keyValue : r.raw()) {
133. System.out.println("列:" + new String(keyValue.getFamily())
134. + "====值:" + new String(keyValue.getValue()));
135. }
136. } catch (IOException e) {
137. e.printStackTrace();
138. }
139. }
140. public static void QueryByCondition2(String tableName) {
141. try {
142. HTablePool pool = new HTablePool(configuration, 1000);
143. HTable table = (HTable) pool.getTable(tableName);
144. Filter filter = new SingleColumnValueFilter(Bytes
145. .toBytes("column1"), null, CompareOp.EQUAL, Bytes
146. .toBytes("aaa")); // 当列column1的值为aaa时进行查询
147. Scan s = new Scan();
148. s.setFilter(filter);
149. ResultScanner rs = table.getScanner(s);
150. for (Result r : rs) {
151. System.out.println("获得到rowkey:" + new String(r.getRow()));
152. for (KeyValue keyValue : r.raw()) {
153. System.out.println("列:" + new String(keyValue.getFamily())
154. + "====值:" + new String(keyValue.getValue()));
155. }
156. }
157. } catch (Exception e) {
158. e.printStackTrace();
159. }
160. }
161. public static void QueryByCondition3(String tableName) {
162. try {
163. HTablePool pool = new HTablePool(configuration, 1000);
164. HTable table = (HTable) pool.getTable(tableName);
165. List filters = new ArrayList();
166. Filter filter1 = new SingleColumnValueFilter(Bytes
167. .toBytes("column1"), null, CompareOp.EQUAL, Bytes
168. .toBytes("aaa"));
169. filters.add(filter1);
170. Filter filter2 = new SingleColumnValueFilter(Bytes
171. .toBytes("column2"), null, CompareOp.EQUAL, Bytes
172. .toBytes("bbb"));
173. filters.add(filter2);
174. Filter filter3 = new SingleColumnValueFilter(Bytes
175. .toBytes("column3"), null, CompareOp.EQUAL, Bytes
176. .toBytes("ccc"));
177. filters.add(filter3);
178. FilterList filterList1 = new FilterList(filters);
179. Scan scan = new Scan();
180. scan.setFilter(filterList1);
181. ResultScanner rs = table.getScanner(scan);
182. for (Result r : rs) {
183. System.out.println("获得到rowkey:" + new String(r.getRow()));
184. for (KeyValue keyValue : r.raw()) {
185. System.out.println("列:" + new String(keyValue.getFamily())
186. + "====值:" + new String(keyValue.getValue()));
187. }
188. }
189. rs.close();
190. } catch (Exception e) {
191. e.printStackTrace();
192. }
193. }
194. }
注意:可能大家没看到更新数据的操作,其实更新的操作跟添加完全一致,只不过是添加呢rowkey不存在,更新呢rowkey已经存在,并且timstamp相同的情况下,还有就是目前好像还没办法实现hbase数据的分页查询,不知道有没有人知道怎么做
HBase性能优化建议:
针对前面的代码,有很多不足之处,在此我就不修改上面的代码了,只是提出建议的地方,大家自己加上
1)配置
当你调用create方法时将会加载两个配置文件:hbase-default.xml and hbase-site.xml,利用的是当前的java类路径, 代码中configuration设置的这些配置将会覆盖hbase-default.xml和hbase-site.xml中相同的配置,如果两个配置文件都存在并且都设置好了相应参上面的属性下面的属性即可
2)关于建表
public void createTable(HTableDescriptor desc)
HTableDescriptor 代表的是表的schema, 提供的方法中比较有用的有
setMaxFileSize,指定最大的region size
setMemStoreFlushSize 指定memstore flush到HDFS上的文件大小
增加family通过 addFamily方法
public void addFamily(final HColumnDescriptor family)
HColumnDescriptor 代表的是column的schema,提供的方法比较常用的有
setTimeToLive:指定最大的TTL,单位是ms,过期数据会被自动删除。
setInMemory:指定是否放在内存中,对小表有用,可用于提高效率。默认关闭
setBloomFilter:指定是否使用BloomFilter,可提高随机查询效率。默认关闭
setCompressionType:设定数据压缩类型。默认无压缩。
setMaxVersions:指定数据最大保存的版本个数。默认为3。
注意的是,一般我们不去setInMemory为true,默认是关闭的
3)关于入库
官方建议
table.setAutoFlush(false); //数据入库之前先设置此项为false
table.setflushCommits();//入库完成后,手动刷入数据
注意:
在入库过程中,put.setWriteToWAL(true/flase);
关于这一项如果不希望大量数据在存储过程中丢失,建议设置为true,如果仅是在测试演练阶段,为了节省入库时间建议设置为false
4)关于获取表实例
HTablePool pool = new HTablePool(configuration, Integer.MAX_VALUE);
HTable table = (HTable) pool.getTable(tableName);
建议用表连接池的方式获取表,具体池有什么作用,我想用过数据库连接池的同学都知道,我就不再重复
不建议使用new HTable(configuration,tableName);的方式获取表
5)关于查询
建议每个查询语句都放入try catch语句块,并且finally中要进行关闭ResultScanner实例以及将不使用的表重新放入到HTablePool中的操作,具体做法如下
1. public static void QueryAll(String tableName) {
2. HTablePool pool = new HTablePool(configuration, Integer.MAX_VALUE);
3. HTable table = null;
4. ResultScanner rs = null;
5. try {
6. Scan scan = new Scan();
7. table = (HTable) pool.getTable(tableName);
8. rs = table.getScanner(scan);
9. for (Result r : rs) {
10. System.out.println("获得到rowkey:" + new String(r.getRow()));
11. for (KeyValue keyValue : r.raw()) {
12. System.out.println("列:" + new String(keyValue.getFamily())
13. + "====值:" + new String(keyValue.getValue()));
14. }
15. }
16. } catch (IOException e) {
17. e.printStackTrace();
18. }finally{
19. rs.close();// 最后还得关闭
20. pool.putTable(table); //实际应用过程中,pool获取实例的方式应该抽取为单例模式的,不应在每个方法都重新获取一次(单例明白?就是抽取到专门获取pool的逻辑类中,具体逻辑为如果pool存在着直接使用,如果不存在则new)
21. }
22. }