hbase各种遍历查询shell语句包含过滤组合条件
1. import java.io.IOException;
2. import java.util.ArrayList;
3. import java.util.Arrays;
4. import java.util.List;
5.
6. import org.f.Configuration;
7. import org.apache.hadoop.hbase.Cell;
8. import org.apache.hadoop.hbase.CellUtil;
9. import org.apache.hadoop.hbase.HBaseConfiguration;
10. import org.apache.hadoop.hbase.TableName;
11. import org.apache.hadoop.hbase.client.Admin;
12. import org.apache.hadoop.hbase.client.Connection;
13. import org.apache.hadoop.hbase.client.ConnectionFactory;
14. import org.apache.hadoop.hbase.client.Get;
15. import org.apache.hadoop.hbase.client.Result;
16. import org.apache.hadoop.hbase.client.ResultScanner;
17. import org.apache.hadoop.hbase.client.Scan;
18. import org.apache.hadoop.hbase.client.Table;
19. import org.apache.hadoop.hbase.filter.BinaryComparator;
20. import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
21. import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
22. import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
23. import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
24. import org.apache.hadoop.hbase.filter.DependentColumnFilter;
25. import org.apache.hadoop.hbase.filter.FamilyFilter;
26. import org.apache.hadoop.hbase.filter.Filter;
27. import org.apache.hadoop.hbase.filter.FilterList;
28. import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
29. import org.apache.hadoop.hbase.filter.FuzzyRowFilter;
30. import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
31. import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
32. import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
33. import org.apache.hadoop.hbase.filter.PageFilter;
34. import org.apache.hadoop.hbase.filter.PrefixFilter;
35. import org.apache.hadoop.hbase.filter.QualifierFilter;
36. import org.apache.hadoop.hbase.filter.RandomRowFilter;
37. import org.apache.hadoop.hbase.filter.RegexStringComparator;
38. import org.apache.hadoop.hbase.filter.RowFilter;
39. import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
40. import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
41. import org.apache.hadoop.hbase.filter.SkipFilter;
42. import org.apache.hadoop.hbase.filter.SubstringComparator;
43. import org.apache.hadoop.hbase.filter.TimestampsFilter;
44. import org.apache.hadoop.hbase.filter.ValueFilter;
45. import org.apache.hadoop.hbase.filter.WhileMatchFilter;
46. import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
47. import org.apache.hadoop.hbase.util.Bytes;
48. import org.apache.hadoop.hbase.util.Pair;
49.
50. public class HbaseUtils {
51.
52. public static Admin admin = null;
53. public static Connection conn = null;
54.
55. public HbaseUtils() {
56. Configuration conf = ate();
57. conf.set("keeper.quorum", "h71:2181");
58. conf.set("dir", "hdfs://h71:9000/hbase");
59. try {
60. conn = ateConnection(conf);
61. admin = Admin();
62. } catch (IOException e) {
63. e.printStackTrace();
64. }
65. }
66. public static void main(String[] args) throws Exception {
67. HbaseUtils hbase = new HbaseUtils();
68. //1,FamilyFilter:基于“列族”来过滤数据;
69. // hbase.FamilyFilter("scores");
70. //2,QualifierFilter:基于“列名”来过滤数据;
71. // hbase.QualifierFilter("scores");
72. //3.RowFilter:基于rowkey来过滤数据;
73. // hbase.RowFilter("scores","zhangsan01");
74. //4.PrefixFilter:基于rowkey前缀来过滤数据;
75. // hbase.PrefixFilter("scores","zhang");
76. //后缀过滤数据
77. // hbase.HouZui("scores");
78. //5,ColumnPrefixFilter:基于列名前缀来过滤数据;
79. // hbase.ColumnPrefixFilter("scores");
80. //6,MultipleColumnPrefixFilter:ColumnPrefixFilter的加强版;
81. // hbase.MultipleColumnPrefixFilter("scores");
82. //7,ColumnCountGetFilter:限制每⾏返回多少列;
83. // lumnCountGetFilter();
84. //8,ColumnPaginationFilter:对⼀⾏的所有列分页,只返回[limit, offset]范围内的列;
85. // hbase.ColumnPaginationFilter("scores");
86. //9,ColumnRangeFilter:可⽤于获得⼀个范围的列
87. // hbase.ColumnRangeFilter("scores");
88. //10,DependentColumnFilter:返回(与(符合条件[列族,列名]或[列族,列名,值]的参考列)具有相同的时间戳)的所有列,即:基于⽐较器过滤参考列,基于参
考列的时间戳过滤其他列;
89. // hbase.DependentColumnFilter("scores");
90. //11,FirstKeyOnlyFilter:结果只返回每⾏的第⼀个值对;
91. // hbase.FirstKeyOnlyFilter("scores");
92. //12,FuzzyRowFilter:模糊row查询;
93. // hbase.FuzzyRowFilter("scores");
94. //13,InclusiveStopFilter:将stoprow也⼀起返回;
95. // hbase.InclusiveStopFilter("scores");
96. //14,KeyOnlyFilter:只返回⾏键;
97. // hbase.KeyOnlyFilter("scores");
98. //15,PageFilter:取回XX条数据;
99. // hbase.PageFilter("scores");
100. //16,RandomRowFilter:随机获取⼀定⽐例(⽐例为参数)的数据;
101. // hbase.RandomRowFilter("scores");
102. //17,SingleColumnValueFilter:基于参考列的值来过滤数据;
103. // hbase.SingleColumnValueFilter("scores");
104. //18,ValueFilter:基于值来过滤数据;
105. // hbase.ValueFilter("scores");
106. //19,SkipFilter:当过滤器发现某⼀⾏中的⼀列要过滤时,就将整⾏数据都过滤掉;
107. // hbase.SkipFilter("scores");
108. //20,TimestampsFilter:基于时间戳来过滤数据;
109. // hbase.TimestampsFilter("scores");
110. //21,WhileMatchFilter:⼀旦遇到⼀条符合过滤条件的数据,就停⽌扫描;
111. // hbase.WhileMatchFilter("scores");
112. //22,FilterList:多个过滤器组合过滤。
113. // hbase.FilterList("scores");
114. }
115.
116. /**
117. 1,FamilyFilter
118. a,按family(列族)查,取回所有符合条件的“family”
119. b,构造⽅法第⼀个参数为compareOp
120. c,第⼆个参数为WritableByteArrayComparable,有BinaryComparator, BinaryPrefixComparator,
121. BitComparator, NullComparator, RegexStringComparator, SubstringComparator这些类,
122. 最常⽤的为BinaryComparator
123. */
124. public void FamilyFilter(String tableName) throws Exception {
125. Table table = Table(TableName.valueOf(tableName));
126. Scan scan = new Scan();
127. Filter filter = new FamilyFilter(CompareOp.LESS_OR_EQUAL, new Bytes("grc")));
128. scan.setFilter(filter);
129. ResultScanner scanner = Scanner(scan);
130. for (Result r : scanner) {
131. for (Cell cell : r.rawCells()) {
132. System.out.println(
133. "Rowkey-->"+Row())+" "+
134. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
135. "Value-->"+String(CellUtil.cloneValue(cell)));
136. }
137. }
138. }
139. /*
140. hbase(main):224:0> scan 'scores', {FILTER => "FamilyFilter(<=,'binary:grc')"}
141. 或者
142. hbase(main):011:0> scan 'scores', FILTER => "FamilyFilter(<=,'binary:grc')"
143. ROW COLUMN+CELL
144. lisi01 column=course:art, timestamp=1498003655021, value=89 145. lisi01 column=course:math, timestamp=1498003561726, value=89 146. lisi01 column=grade:, timestamp=1498003561726, value=201 147. zhangsan01 column=course:art, timestamp=1498003561726, value=90 148. zhangsan01 column=course:math, timestamp=1498003561726, value=99 149. zhangsan01 column=grade:, timestamp=1498003593575, value=101 150. zhangsan02 column=course:art, timestamp=149
8003601365, value=90 151. zhangsan02 column=course:math, timestamp=1498003561726, value=66 152. zhangsan02 column=grade:, timestamp=1498003601365, value=102 153. 3 row(s) in 0.0220 seconds
154. */
155.
156. /**
157. 2,QualifierFilter
158. 类似于FamilyFilter,取回所有符合条件的“列”
159. 构造⽅法第⼀个参数 compareOp
160. 第⼆个参数为WritableByteArrayComparable
161. */
162. public void QualifierFilter(String tableName) throws Exception {
163. Table table = Table(TableName.valueOf(tableName));
164. Scan scan = new Scan();
165. Filter filter = new QualifierFilter(CompareOp.LESS_OR_EQUAL, new Bytes("grc")));
166. //这⾥输的参数是相应位置⽐⼤⼩,及当输⼊ms的时候,所有列名的第⼀位⼩于等于m,如果第⼀位相等则⽐较第⼆位的⼤⼩。⼀开始没理解,所以⼀开始参数输⼊math或course的时候把我整懵了。
167. scan.setFilter(filter);
168. ResultScanner scanner = Scanner(scan);
169. for (Result r : scanner) {
170. for (Cell cell : r.rawCells()) {
171. System.out.println(
172. "Rowkey-->"+Row())+" "+
173. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
174. "Value-->"+String(CellUtil.cloneValue(cell)));
175. }
176. }
177. }
178. /*
179. hbase(main):221:0> scan 'scores', {FILTER => "QualifierFilter(<=,'binary:b')"}
180. ROW COLUMN+CELL
181. lisi01 column=course:art, timestamp=1498003655021, value=89
182. lisi01 column=grade:, timestamp=1498003561726, value=201 183. zhangsan01 column=course:art, timestamp=1498003561726, value=90 184. zhangsan01 column=grade:, timestamp=1498003593575, value=101 185. zhangsan02 column=course:art, timestamp=1498003601365, value=90 186. zhangsan02 column=grade:, timestamp=1498003601365, value=102 187. 3 row(s) in 0.0470 seconds
188. */
189.
190. /**
191. 3,RowFilter
192. 构造⽅法参数设置类似于FamilyFilter,符合条件的row都返回
193. 但是通过row查询时,如果知道开始结束的row,还是⽤scan的start和end⽅法更直接并且经测试速度快⼀半以上
194. */
195. public void RowFilter(String tableName, String reg) throws Exception {
196. Table table = Table(TableName.valueOf(tableName));
197. Scan scan = new Scan();
198. //这个参数EQUAL很重要,如果参数不同,查询的结果也会不同
199. // RowFilter filter = new RowFilter(CompareOp.EQUAL, new Bytes(reg)));//这样写也⾏
200. // Filter filter = new RowFilter(CompareOp.EQUAL, new Bytes(reg)))
;
201. Filter filter = new RowFilter(CompareOp.LESS_OR_EQUAL, new Bytes(reg)));
202. scan.setFilter(filter);
203. ResultScanner scanner = Scanner(scan);
204. for (Result r : scanner) {
205. for (Cell cell : r.rawCells()) {
206. System.out.println(
207. "Rowkey-->"+Row())+" "+
208. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
209. "Value-->"+String(CellUtil.cloneValue(cell)));
210. }
211. }
212. /**
213. * 更推荐⽤下⾯的⽅法直接指定起⽌⾏,因为filter本质上还是会遍历全部数据,⽽设定起⽌⾏后会直接从指定⾏开始,指定⾏结束,效率⾼很多。
214. */
215. // scan.Bytes("AAAAAAAAAAAA"));
216. // scan.Bytes( "AAAAAAAAABBB"));
217. }
218. /*
219. hbase(main):004:0> scan 'scores', {FILTER => "RowFilter(<=,'binary:zhangsan01')"}
220. ROW COLUMN+CELL
221. lisi01 column=course:art, timestamp=1498003655021, value=89 222. lisi01 column=course:math, timestamp=1498003561726, value=89 223. lisi01 column=grade:, timestamp=1498003561726, value=201 224. zhangsan01 column=course:art, timestamp=1498003561726, value=90 225. zhangsan01 column=course:math, timestamp=1498003561726, value=99 226. zhangsan01 column=grade:, timestamp=1498003593575, value=101 227. 2 row(s) in 0.0210 seconds
228. */
229.
230. /**
231. 4,PrefixFilter
232. 取回rowkey以指定prefix开头的所有⾏
233. */
234. public void PrefixFilter(String tableName, String reg) throws Exception {
235. Table table = Table(TableName.valueOf(tableName));
236. Scan scan = new Scan();
237. Filter filter = new Bytes("zhang"));
238. scan.setFilter(filter);
239. ResultScanner scanner = Scanner(scan);
240. for (Result r : scanner) {
241. for (Cell cell : r.rawCells()) {
242. System.out.println(
243. "Rowkey-->"+Row())+" "+
244. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
245. "Value-->"+String(CellUtil.cloneValue(cell)));
246. }
247. }
248. }
249. /*
250. hbase(main):022:0> scan 'scores', {FILTER => org.apache.hadoop.hbase.w(org.apache.hadoop.hbase.Bytes('li'))}
251. 或者
252. hbase(main):004:0> scan 'scores', {FILTER => "PrefixFilter('li')"}
253. ROW COLUMN+CELL
254. lisi01 column=course:art, timestamp=1489747672249, value=89 255. lisi01 column=course:math, timestamp=1489747666861, value=89 256. lisi01 column=grade:, timestamp=1489747677402, value=201 257. 1 row(s) in 0.0110 seconds
258. */
259.
260. /**
261. 由于其原⽣带有PrefixFilter这种对ROWKEY的前缀过滤查询,因此想着实现的后缀查询的过程中,发现这⼀⽅⾯相对来说还是空⽩。
262. 因此,只能采⽤⼀些策略来实现,主要还是采⽤正则表达式的⽅式。
263. */
264. public void HouZui(String tableName) throws Exception {
265. Table table = Table(TableName.valueOf(tableName));
266. Scan scan = new Scan();
267. Filter filter = new RowFilter(CompareOp.EQUAL,new RegexStringComparator(".*n01"));
268. scan.setFilter(filter);
269. ResultScanner scanner = Scanner(scan);
270. for (Result r : scanner) {
271. for (Cell cell : r.rawCells()) {
272. System.out.println(
273. "Rowkey-->"+Row())+" "+
274. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
275. "Value-->"+String(CellUtil.cloneValue(cell)));
276. }
277. }
278. }
279. /*
280. hbase(main):020:0> scan 'scores', {FILTER => "RowFilter(=,'regexstring:.*n01')"}
281. ROW COLUMN+CELL
282. zhangsan01 column=course:art, timestamp=1498003561726, value=90 283. zhangsan01 column=course:math, timestamp=1498003561726, value=99 284. zhangsan01 column=grade:, timestamp=1498003593575, value=101 285. 1 row(s) in 0.0080 seconds
286. */
287.
288. /**
289. 5,ColumnPrefixFilter
290. */
291. public void ColumnPrefixFilter(String tableName) throws Exception {
292. Table table = Table(TableName.valueOf(tableName));
293. Scan scan = new Scan();
294. byte[] prefix = Bytes("ar");
295. Filter filter = new ColumnPrefixFilter(prefix);
296. scan.setFilter(filter);
297. ResultScanner scanner = Scanner(scan);
298. for (Result r : scanner) {
299. for (Cell cell : r.rawCells()) {
300. System.out.println(
301. "Rowkey-->"+Row())+" "+
302. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
303. "Value-->"+String(CellUtil.cloneValue(cell)));
304. }
305. }
306. }
307. /*
308. hbase(main):021:0> scan 'scores', {FILTER => "ColumnPrefixFilter('ar')"}
309. 或者
310. hbase(main):022:0> scan 'scores', {FILTER => org.apache.hadoop.hbase.w(org.apache.hadoop.hbase.Bytes('ar'))}
311. ROW COLUMN+CELL
312. lisi01 column=course:art, timestamp=1498003655021, value=89 313. zhangsan01 column=course:art, timestamp=1498003561726, value=90 314. zhangsan02 column=course:art, timestamp=1498003601365, value=90 315. 3 row(s) in 0.0140 seconds
316. */
317.
318. /**
319. 6,MultipleColumnPrefixFilter
320. a,返回有此前缀的所有列,
321. b,在byte[][]中定义所有需要的列前缀,只要满⾜其中⼀条约束就会被返回(ColumnPrefixFilter的加强版),
322. */
323. public void MultipleColumnPrefixFilter(String tableName) throws Exception {
324. Table table = Table(TableName.valueOf(tableName));
325. Scan scan = new Scan();
326. byte[][] prefix = {Bytes("ar"),Bytes("ma")};
327. Filter filter = new MultipleColumnPrefixFilter(prefix);
328. scan.setFilter(filter);
329. ResultScanner scanner = Scanner(scan);
330. for (Result r : scanner) {
331. for (Cell cell : r.rawCells()) {
332. System.out.println(
333. "Rowkey-->"+Row())+" "+
334. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
335. "Value-->"+String(CellUtil.cloneValue(cell)));
336. }
337. }
338. }
339. /*
340. hbase(main):023:0> scan 'scores', {FILTER => "MultipleColumnPrefixFilter('ar','ma')"}
341. ROW COLUMN+CELL
342. lisi01 column=course:art, timestamp=1498003655021, value=89 343. lisi01 column=course:math, timestamp=1498003561726, value=89 344. zhangsan01 column=course:art, timestamp=1498003561726, value=90 345. zhangsan01 column=course:math, timestamp=1498003561726, value=99 346. zhangsan02 column=course:art, timestamp=1498003601365, value=90 347. zhangsan02 column=course:math, timestamp=1498003561726, value=66 348. 3 row(s) in 0.0290 seconds
349. */
350.
351. /**
352. 7,ColumnCountGetFilter
353. a,⽆法再scan中使⽤,只能在Get中
354. b,若设为0,则⽆法返回数据,设为⼏就按服务器中存储位置取回⼏列
355. c,可⽤size()取到列数,观察效果
356. */
357. public void columnCountGetFilter() throws Exception {
358. Table table = Table(TableName.valueOf("scores"));
359. Get get = new Bytes("zhangsan01"));
360. get.setFilter(new ColumnCountGetFilter(2));
361. Result result = (get);
362. //输出结果size,观察效果
363. System.out.println(result.size());
364. // byte[] value1 = Value("course".getBytes(), "art".getBytes());
365. // byte[] value2 = Value("course".getBytes(), "math".getBytes());
366. // System.out.println("course:art"+"-->"+new String(value1)+" "
367. // +"course:math"+"-->"+new String(value2));
368. }
369. /*
370. hbase(main):026:0> scan 'scores', {FILTER => "ColumnCountGetFilter(2)"}
371. ROW COLUMN+CELL
372. lisi01 column=course:art, timestamp=1498003655021, value=89 373. lisi01 column=course:math, timestamp=1498003561726, value=89 374. zhangsan01 column=course:art, timestamp=1498003561726, value=90 375. zhangsan01 column=course:math, timestamp=1498003561726, value=99 376. zhangsan02 column=course:art, timestamp=1498003601365, value=90 377. zhangsan02 column=course:math, timestamp=1498003561726, value=66 378. 3 row(s) in 0.0120 seconds
379. */
380.
381. /**
382. 8,ColumnPaginationFilter
383. a,limit 表⽰返回列数
384. b,offset 表⽰返回列的偏移量,如果为0,则全部取出,如果为1,则返回第⼆列及以后
183组合385. */
386. public void ColumnPaginationFilter(String tableName) throws Exception {
387. Table table = Table(TableName.valueOf(tableName));
388. Scan scan = new Scan();
389. Filter filter = new ColumnPaginationFilter(2,1);
390. scan.setFilter(filter);
391. // ⽤addFamily增加列族后,会只返回指定列族的数据
392. scan.Bytes("course"));
393. ResultScanner scanner = Scanner(scan);
394. for (Result r : scanner) {
395. for (Cell cell : r.rawCells()) {
396. System.out.println(
397. "Rowkey-->"+Row())+" "+
398. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
399. "Value-->"+String(CellUtil.cloneValue(cell)));
400. }
401. }
402. }
403. /*
404. hbase(main):031:0> scan 'scores',{FILTER=>org.apache.hadoop.hbase.w(2,1)}
405. 或者
406. hbase(main):030:0> scan 'scores',{FILTER=> "ColumnPaginationFilter(2,1)"}
407. ROW COLUMN+CELL
408. lisi01 column=course:math, timestamp=1498003561726, value=89 409. lisi01 column=grade:, timestamp=1498003561726, value=201 410. zhangsan01 column=course:math, timestamp=1498003561726, value=99 411. zhangsan01 column=grade:, timestamp=1498003593575, value=101 412. zhangsan02 column=course:math, timestamp=1498003561726, value=66 413. zhangsan02 column=grade:, t
imestamp=1498003601365, value=102 414. 3 row(s) in 0.0100 seconds
415. */
416.
417. /**
418. 9,ColumnRangeFilter
419. 构造函数:
420. ColumnRangeFilter(byte[] minColumn, boolean minColumnInclusive, byte[] maxColumn, boolean maxColumnInclusive)
421. *可⽤于获得⼀个范围的列,例如,如果你的⼀⾏中有百万个列,但是你只希望查看列名为bbbb到dddd的范围
422. *该过滤器可以进⾏⾼效的列名内部扫描。(为何是⾼效呢因为列名是已经按字典排序好的)HBase-0.9.2 版本引⼊该功能。
423. *⼀个列名是可以出现在多个列族中的,该过滤器将返回所有列族中匹配的列
424. */
425. public void ColumnRangeFilter(String tableName) throws Exception {
426. Table table = Table(TableName.valueOf(tableName));
427. Scan scan = new Scan();
428. Filter filter = new Bytes("a"),true, Bytes("n"),true);
429. scan.setFilter(filter);
430. ResultScanner scanner = Scanner(scan);
431. for (Result r : scanner) {
432. for (Cell cell : r.rawCells()) {
433. System.out.println(
434. "Rowkey-->"+Row())+" "+
435. "Familiy:Quilifier-->"+String(CellUtil.cloneQualifier(cell))+" "+
436. "Value-->"+String(CellUtil.cloneValue(cell)));
437. }
438. }
439. }
440. /*
441. hbase(main):032:0> scan 'scores',{FILTER=> "ColumnRangeFilter('a',true,'n',true)"}
442. ROW COLUMN+CELL
443. lisi01 column=course:art, timestamp=1498003655021, value=89 444. lisi01 column=course:math, timestamp=1498003561726, value=89
445. zhangsan01 column=course:art, timestamp=1498003561726, value=90 446. zhangsan01 column=course:math, timestamp=1498003561726, value=99 447. zhangsan02 column=course:art, timestamp=1498003601365, value=90 448. zhangsan02 column=course:math, timestamp=1498003561726, value=66 449. 3 row(s) in 0.0140 seconds
450. */
451.
452. /**
453. 10, DependentColumnFilter (该过滤器有两个参数:family和Qualifier,尝试到该列所在的每⼀⾏,
454. 并返回该⾏具有相同时间戳的全部键值对。如果某⼀⾏不包含指定的列,则该⾏的任何键值对都不返回,
455. 该过滤器还可以有⼀个可选的布尔参数-如果为true,从属的列不返回;
456. 该过滤器还可以有两个可选的参数--⼀个⽐较操作符和⼀个值⽐较器,⽤于family和Qualifier
457. 的进⼀步检查,如果从属的列到,其值还必须通过值检查,然后就是时间戳必须考虑)
458. */
459. public void DependentColumnFilter(String tableName) throws Exception {
460. Table table = Table(TableName.valueOf(tableName));
461. Scan scan = new Scan();
462. // Filter filter = new Bytes("course"), Bytes("art"),false);
463. // Filter filter = new Bytes("course"), Bytes("art"),true);
发布评论