0: jdbc:phoenix:localhost> select count(*) from bigdata;
+------------+
| COUNT(1) |
+------------+
java.lang.RuntimeException: org.apache.phoenix.exception.PhoenixIOException: org.apache.phoenix.exception.PhoenixIOException: Failed after attempts=35, exceptions:
Mon Jul 13 14:14:03 CST 2015, org.apache.hadoop.hbase.client.RpcRetryingCaller@238f5df3, java.io.IOException: java.io.IOException: Could not reseek StoreFileScanner[HFileScanner for reader reader=hdfs://dmp-nn-1:8020/hbase/data/default/BIGDATA/07af823fa35b2d34add48e8e54ea8897/0/0b48d700f68d42ddad1939af39a993c4_SeqId_2_, compression=none, cacheConf=CacheConfig:enabled [cacheDataOnRead=true] [cacheDataOnWrite=false] [cacheIndexesOnWrite=false] [cacheBloomsOnWrite=false] [cacheEvictOnClose=false] [cacheCompressed=false][prefetchOnOpen=false], firstKey=\x01\x80\x00\x00\x00\x00\x00\x00\x0A/0:ACCEPT/1436523054347/DeleteColumn, lastKey=\x01\x80\x00\x00\x00\x05\xF5\xE0\xFE/0:_0/1436523054347/Put, avgKeyLen=32, avgValueLen=6, entries=329957958, length=6552478020, cur=\x01\x80\x00\x00\x00\x01E$\xBE/0:ACCT_INPUT_PACKET/1436523054347/Put/vlen=1/mvcc=0] to key \x01\x80\x00\x00\x00\x01E$\xBE//OLDEST_TIMESTAMP/Minimum/vlen=0/mvcc=0
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseek(StoreFileScanner.java:184)
at org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner.doRealSeek(NonLazyKeyValueScanner.java:55)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.generalizedSeek(KeyValueHeap.java:313)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.reseek(KeyValueHeap.java:257)
at org.apache.hadoop.hbase.regionserver.StoreScanner.reseek(StoreScanner.java:697)
at org.apache.hadoop.hbase.regionserver.StoreScanner.seekToNextRow(StoreScanner.java:672)
at org.apache.hadoop.hbase.regionserver.StoreScanner.next(StoreScanner.java:529)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.next(KeyValueHeap.java:140)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.populateResult(HRegion.java:3900)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextInternal(HRegion.java:3980)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextRaw(HRegion.java:3858)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextRaw(HRegion.java:3849)
at org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:228)
at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$52.call(RegionCoprocessorHost.java:1203)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1517)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1592)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperationWithResult(RegionCoprocessorHost.java:1556)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1198)
at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3173)
at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:744)
Caused by: java.io.IOException: Failed to read compressed block at 1409556126, onDiskSizeWithoutHeader=27178, preReadHeaderSize=33, header.length=33, header bytes: IDXLEAF2\x00\x02\x00:\x00\x02\x00\x16\x00\x00\x00\x00O\xA4\xF3\xD4\x01\x00\x00@\x00\x00\x02\x007
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockDataInternal(HFileBlock.java:1449)
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockData(HFileBlock.java:1312)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2.readBlock(HFileReaderV2.java:387)
at org.apache.hadoop.hbase.io.hfile.HFileBlockIndex$BlockIndexReader.loadDataBlockWithScanInfo(HFileBlockIndex.java:253)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2$AbstractScannerV2.seekTo(HFileReaderV2.java:524)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2$AbstractScannerV2.reseekTo(HFileReaderV2.java:572)
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseekAtOrAfter(StoreFileScanner.java:257)
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseek(StoreFileScanner.java:173)
... 25 more
Caused by: java.io.IOException: Invalid HFile block magic: \x00\x00\x00\x00\x00\x00\x00\x00
at org.apache.hadoop.hbase.io.hfile.BlockType.parse(BlockType.java:154)
at org.apache.hadoop.hbase.io.hfile.BlockType.read(BlockType.java:165)
at org.apache.hadoop.hbase.io.hfile.HFileBlock.<init>(HFileBlock.java:239)
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockDataInternal(HFileBlock.java:1446)
... 32 more
Mon Jul 13 14:14:16 CST 2015, org.apache.hadoop.hbase.client.RpcRetryingCaller@238f5df3, java.io.IOException: java.io.IOException: Could not reseek StoreFileScanner[HFileScanner for reader reader=hdfs://dmp-nn-1:8020/hbase/data/default/BIGDATA/07af823fa35b2d34add48e8e54ea8897/0/0b48d700f68d42ddad1939af39a993c4_SeqId_2_, compression=none, cacheConf=CacheConfig:enabled [cacheDataOnRead=true] [cacheDataOnWrite=false] [cacheIndexesOnWrite=false] [cacheBloomsOnWrite=false] [cacheEvictOnClose=false] [cacheCompressed=false][prefetchOnOpen=false], firstKey=\x01\x80\x00\x00\x00\x00\x00\x00\x0A/0:ACCEPT/1436523054347/DeleteColumn, lastKey=\x01\x80\x00\x00\x00\x05\xF5\xE0\xFE/0:_0/1436523054347/Put, avgKeyLen=32, avgValueLen=6, entries=329957958, length=6552478020, cur=\x01\x80\x00\x00\x00\x01E$\xBE/0:ACCT_INPUT_PACKET/1436523054347/Put/vlen=1/mvcc=0] to key \x01\x80\x00\x00\x00\x01E$\xBE//OLDEST_TIMESTAMP/Minimum/vlen=0/mvcc=0
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseek(StoreFileScanner.java:184)
at org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner.doRealSeek(NonLazyKeyValueScanner.java:55)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.generalizedSeek(KeyValueHeap.java:313)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.reseek(KeyValueHeap.java:257)
at org.apache.hadoop.hbase.regionserver.StoreScanner.reseek(StoreScanner.java:697)
at org.apache.hadoop.hbase.regionserver.StoreScanner.seekToNextRow(StoreScanner.java:672)
at org.apache.hadoop.hbase.regionserver.StoreScanner.next(StoreScanner.java:529)
at org.apache.hadoop.hbase.regionserver.KeyValueHeap.next(KeyValueHeap.java:140)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.populateResult(HRegion.java:3900)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextInternal(HRegion.java:3980)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextRaw(HRegion.java:3858)
at org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.nextRaw(HRegion.java:3849)
at org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:228)
at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$52.call(RegionCoprocessorHost.java:1203)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1517)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1592)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperationWithResult(RegionCoprocessorHost.java:1556)
at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1198)
at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3173)
at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29587)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2031)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:744)
Caused by: java.io.IOException: Failed to read compressed block at 1409556126, onDiskSizeWithoutHeader=27178, preReadHeaderSize=33, header.length=33, header bytes: \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockDataInternal(HFileBlock.java:1449)
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockData(HFileBlock.java:1312)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2.readBlock(HFileReaderV2.java:387)
at org.apache.hadoop.hbase.io.hfile.HFileBlockIndex$BlockIndexReader.loadDataBlockWithScanInfo(HFileBlockIndex.java:253)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2$AbstractScannerV2.seekTo(HFileReaderV2.java:524)
at org.apache.hadoop.hbase.io.hfile.HFileReaderV2$AbstractScannerV2.reseekTo(HFileReaderV2.java:572)
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseekAtOrAfter(StoreFileScanner.java:257)
at org.apache.hadoop.hbase.regionserver.StoreFileScanner.reseek(StoreFileScanner.java:173)
... 25 more
Caused by: java.io.IOException: Invalid HFile block magic: \x00\x00\x00\x00\x00\x00\x00\x00
at org.apache.hadoop.hbase.io.hfile.BlockType.parse(BlockType.java:154)
at org.apache.hadoop.hbase.io.hfile.BlockType.read(BlockType.java:165)
at org.apache.hadoop.hbase.io.hfile.HFileBlock.<init>(HFileBlock.java:239)
at org.apache.hadoop.hbase.io.hfile.HFileBlock$FSReaderV2.readBlockDataInternal(HFileBlock.java:1446)
... 32 more
Hadoop系统为了保证数据的一致性,会对文件生成相应的校验文件,并在读写的时候进行校验,确保数据的准确性。
所以说此类的问题,只有一种情况会导致,就是硬盘故障,就是写进去的文件(无所谓完整不完整,生成的校验文件实际上是根据写进去的文件生成的,所以说校验文件和文件块是一一对应,并保持一致的),只要写进去的和读出来的内容保持一致了,校验才可以通过,这个块有3份,写到这个机器的1份如果校验不过去,实际上,hbase也不会去读其他对应冗余块,所以说,处理的方法,不是把此节点上的regionserver,而是把datanode,regionserver通通关闭,然后集群自动检测这个节点不存在后,让他自己再去补齐这个块的冗余;
我把此节点datanode,regionserver关闭后,开始对此节点的硬盘进行检测;
检测后,发现有一块硬盘,存在坏块;摘掉此硬盘,恢复hbase的校验参数,服务正常;
hadoop集群可以做到机器直接宕机的容错,但是实际上没有做到磁盘的故障的容错,看来磁盘的监控十分有必要去做;
解决过程记录:
1.停下hbasse、zookeeper相关服务
2.badblocks -s -v -o /root/badblocks.log /dev/sdc 检测磁盘
3.关闭存在坏道的机器节点 service hadoop-yarn-nodemanager stop
4.namenode上删除坏道节点
5.bin/start-balancer.sh
6.配置/etc/zookeeper/conf.dist/zoo.cfg,移除存在坏道的节点(如果此节点有作为zookeeper节点的话)
7.配置/usr/local/hbase-0.98.6-cdh5.2.0/conf/hbase-site.xml的hbase.zookeeper.quorum属性,移除存在坏道的节点(如果此节点有作为zookeeper节点的话)
8.配置/usr/local/hbase-0.98.6-cdh5.2.0/conf/regionservers,移除存在坏道的节点。
9.初始化、启动zookeeper,启动hbase

在执行HBase查询时遇到'Could not reseek StoreFileScanner'异常,这通常与硬盘故障有关。错误表明在读取HFileBlock时校验失败。解决方法包括关闭有问题的datanode和regionserver,检测硬盘发现坏块,移除并替换故障硬盘,更新配置文件,重新初始化和启动服务。
1777

被折叠的 条评论
为什么被折叠?



