2022.7.29 作业

1、使用IO函数实现“ls  -l  路径”的效果

#include <stdio.h>
#include <sys/types.h>
#include <dirent.h>
#include <sys/stat.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <pwd.h>
#include <grp.h>
#include <time.h>

int get_file(char *fname,DIR *dp);
int get_char_mode(mode_t mode);
int get_usrname(uid_t uid);
int get_grpname(gid_t gid);
int get_time(time_t sec);
void ls_only_file(struct stat buf,const char *fname);

int main(int argc, const char *argv[])
{
	if(argc<2){
		fputs("传参不足\n",stderr);
		return -1;
	}
	char fname[256];
	char pathname[256];
	int res;
	struct stat buf;
	DIR *dp=opendir(argv[1]);
	if(NULL==dp){
		perror("opendir");
		return -1;
	}
	while(1){
		bzero(fname,sizeof(fname));
		bzero(pathname,sizeof(pathname));
		res=get_file(fname,dp);
		strcpy(pathname,argv[1]);
		strcat(pathname,fname);
		
		if(-1==res){
			fputs("读取有误\n",stderr);
			closedir(dp);
			return -1;
		}else if(1==res){
			fputs("目录读取完毕\n",stdout);
			break;
		}else if(2==res){
			if(stat(pathname,&buf)<0){
				fputs("stat failed\n",stderr);
				closedir(dp);
				return -1;
			}
			ls_only_file(buf,fname);
		}
	}
	closedir(dp);

	return 0;
}

int get_file(char *fname,DIR *dp)
{
	int i;
	struct dirent *rp=NULL;

	rp=readdir(dp);
	if(NULL==rp){
		if(errno==0){
			return 1;
		}else{
			perror("readdir");
			return -1;
		}
	}
	if(rp->d_name[0]!='.'){
		for(i=0;i<strlen(rp->d_name);i++){
			fname[i]=rp->d_name[i];
		}
		fname[i]=0;
		return 2;
	}

	return 0;
}

int get_char_mode(mode_t mode)
{
	switch (mode & S_IFMT) {
	case S_IFBLK:  printf("b");         break;
	case S_IFCHR:  printf("c");         break;
	case S_IFDIR:  printf("d");         break;
	case S_IFIFO:  printf("p");         break;
	case S_IFLNK:  printf("l");         break;
	case S_IFREG:  printf("-");         break;
	case S_IFSOCK: printf("s");         break;
	default:       printf("unknown?");  break;
	}

	for(int i=8;i>=0;i--){
		if((mode&(1<<i))!=0){
			if(i%3==2){
				printf("r");
			}else if(i%3==1){
				printf("w");
			}else{
				printf("x");
			}
		}else{
			printf("-");
		}
	}
	return 0;
}

int get_usrname(uid_t uid)
{
	struct passwd *pd=getpwuid(uid);
	if(NULL==pd){
		return -1;
	}
	printf(" %s",pd->pw_name);
	return 0;
}

int get_grpname(gid_t gid)
{
	struct group *gp=getgrgid(gid);
	if(NULL==gp){
		return -1;
	}
	printf(" %s",gp->gr_name);
	return 0;
}

int get_time(time_t sec)
{
	struct tm *info=localtime(&sec);
	if(NULL==info){
		return -1;
	}
	printf(" %d月  %d %02d:%02d",info->tm_mon+1,info->tm_mday,\
			info->tm_hour,info->tm_min);
	return 0;
}

void ls_only_file(struct stat buf,const char *fname)
{
	get_char_mode(buf.st_mode);
	printf(" %ld",buf.st_nlink);
	get_usrname(buf.st_uid);
	get_grpname(buf.st_gid);
	printf(" %5ld",buf.st_size);
	get_time(buf.st_ctime);
	printf(" %s\n",fname);
}

测试:

 

2025-11-17 11:39:42,286 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: registered UNIX signal handlers for [TERM, HUP, INT] 2025-11-17 11:39:44,201 INFO org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker: Scheduling a check for [DISK]file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data 2025-11-17 11:39:44,747 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: Loaded properties from hadoop-metrics2.properties 2025-11-17 11:39:45,024 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled Metric snapshot period at 10 second(s). 2025-11-17 11:39:45,025 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: DataNode metrics system started 2025-11-17 11:39:46,402 INFO org.apache.hadoop.hdfs.server.common.Util: dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling 2025-11-17 11:39:46,463 INFO org.apache.hadoop.hdfs.server.datanode.BlockScanner: Initialized block scanner with targetBytesPerSec 1048576 2025-11-17 11:39:46,488 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Configured hostname is zwk-virtual-machine 2025-11-17 11:39:46,489 INFO org.apache.hadoop.hdfs.server.common.Util: dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling 2025-11-17 11:39:46,500 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Starting DataNode with maxLockedMemory = 0 2025-11-17 11:39:46,621 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Opened streaming server at /0.0.0.0:9866 2025-11-17 11:39:46,626 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Balancing bandwidth is 104857600 bytes/s 2025-11-17 11:39:46,626 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Number threads for balancing is 100 2025-11-17 11:39:47,035 INFO org.eclipse.jetty.util.log: Logging initialized @6372ms to org.eclipse.jetty.util.log.Slf4jLog 2025-11-17 11:39:48,027 WARN org.apache.hadoop.security.authentication.server.AuthenticationFilter: Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/zwk/hadoop-http-auth-signature-secret 2025-11-17 11:39:48,050 INFO org.apache.hadoop.http.HttpRequestLog: Http request log for http.requests.datanode is not defined 2025-11-17 11:39:48,063 INFO org.apache.hadoop.http.HttpServer2: Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter) 2025-11-17 11:39:48,066 INFO org.apache.hadoop.http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context datanode 2025-11-17 11:39:48,066 INFO org.apache.hadoop.http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs 2025-11-17 11:39:48,067 INFO org.apache.hadoop.http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static 2025-11-17 11:39:48,235 INFO org.apache.hadoop.http.HttpServer2: Jetty bound to port 44301 2025-11-17 11:39:48,244 INFO org.eclipse.jetty.server.Server: jetty-9.4.48.v20220622; built: 2022-06-21T20:42:25.880Z; git: 6b67c5719d1f4371b33655ff2d047d24e171e49a; jvm 1.8.0_371-b11 2025-11-17 11:39:48,344 INFO org.eclipse.jetty.server.session: DefaultSessionIdManager workerName=node0 2025-11-17 11:39:48,344 INFO org.eclipse.jetty.server.session: No SessionScavenger set, using defaults 2025-11-17 11:39:48,353 INFO org.eclipse.jetty.server.session: node0 Scavenging every 660000ms 2025-11-17 11:39:48,404 INFO org.eclipse.jetty.server.handler.ContextHandler: Started o.e.j.s.ServletContextHandler@71529963{logs,/logs,file:///home/zwk/hadoop/hadoop-3.3.5/logs/,AVAILABLE} 2025-11-17 11:39:48,409 INFO org.eclipse.jetty.server.handler.ContextHandler: Started o.e.j.s.ServletContextHandler@3f270e0a{static,/static,file:///home/zwk/hadoop/hadoop-3.3.5/share/hadoop/hdfs/webapps/static/,AVAILABLE} 2025-11-17 11:39:48,765 INFO org.eclipse.jetty.server.handler.ContextHandler: Started o.e.j.w.WebAppContext@aafcffa{datanode,/,file:///home/zwk/hadoop/hadoop-3.3.5/share/hadoop/hdfs/webapps/datanode/,AVAILABLE}{file:/home/zwk/hadoop/hadoop-3.3.5/share/hadoop/hdfs/webapps/datanode} 2025-11-17 11:39:48,807 INFO org.eclipse.jetty.server.AbstractConnector: Started ServerConnector@74c79fa2{HTTP/1.1, (http/1.1)}{localhost:44301} 2025-11-17 11:39:48,809 INFO org.eclipse.jetty.server.Server: Started @8144ms 2025-11-17 11:39:49,100 WARN org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer: Got null for restCsrfPreventionFilter - will not do any filtering. 2025-11-17 11:39:49,366 INFO org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer: Listening HTTP traffic on /0.0.0.0:9864 2025-11-17 11:39:49,417 INFO org.apache.hadoop.util.JvmPauseMonitor: Starting JVM pause monitor 2025-11-17 11:39:49,434 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: dnUserName = zwk 2025-11-17 11:39:49,434 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: supergroup = supergroup 2025-11-17 11:39:49,782 INFO org.apache.hadoop.ipc.CallQueueManager: Using callQueue: class java.util.concurrent.LinkedBlockingQueue, queueCapacity: 1000, scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler, ipcBackoff: false. 2025-11-17 11:39:50,113 INFO org.apache.hadoop.ipc.Server: Starting Socket Reader #1 for port 9867 2025-11-17 11:39:50,994 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Opened IPC server at /0.0.0.0:9867 2025-11-17 11:39:51,179 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Refresh request received for nameservices: null 2025-11-17 11:39:51,234 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Starting BPOfferServices for nameservices: <default> 2025-11-17 11:39:51,319 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:9000 starting to offer service 2025-11-17 11:39:51,392 INFO org.apache.hadoop.ipc.Server: IPC Server Responder: starting 2025-11-17 11:39:51,422 INFO org.apache.hadoop.ipc.Server: IPC Server listener on 9867: starting 2025-11-17 11:39:53,925 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Acknowledging ACTIVE Namenode during handshakeBlock pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:9000 2025-11-17 11:39:53,966 INFO org.apache.hadoop.hdfs.server.common.Storage: Using 1 threads to upgrade data directories (dfs.datanode.parallel.volumes.load.threads.num=1, dataDirs=1) 2025-11-17 11:39:54,005 INFO org.apache.hadoop.hdfs.server.common.Storage: Lock on /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/in_use.lock acquired by nodename 3605@zwk-virtual-machine 2025-11-17 11:39:54,137 INFO org.apache.hadoop.hdfs.server.common.Storage: Analyzing storage directories for bpid BP-655327536-127.0.1.1-1762954302619 2025-11-17 11:39:54,138 INFO org.apache.hadoop.hdfs.server.common.Storage: Locking is disabled for /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619 2025-11-17 11:39:54,146 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Setting up storage: nsid=1281678374;bpid=BP-655327536-127.0.1.1-1762954302619;lv=-57;nsInfo=lv=-66;cid=CID-de621368-0d95-41d0-9ebe-6538d4b7b6e3;nsid=1281678374;c=1762954302619;bpid=BP-655327536-127.0.1.1-1762954302619;dnuuid=c229f5e4-2241-43fd-8c64-6086b77fa49a 2025-11-17 11:39:54,235 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: The datanode lock is a read write lock 2025-11-17 11:39:55,179 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Added new volume: DS-5c441fde-88f8-4f39-a77a-075dfdc1cdf9 2025-11-17 11:39:55,179 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Added volume - [DISK]file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data, StorageType: DISK 2025-11-17 11:39:55,198 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MemoryMappableBlockLoader: Initializing cache loader: MemoryMappableBlockLoader. 2025-11-17 11:39:55,206 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Registered FSDatasetState MBean 2025-11-17 11:39:55,238 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Adding block pool BP-655327536-127.0.1.1-1762954302619 2025-11-17 11:39:55,245 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Scanning block pool BP-655327536-127.0.1.1-1762954302619 on volume /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data... 2025-11-17 11:39:55,280 WARN org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: dfsUsed file missing in /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current, will proceed with Du for space computation calculation, 2025-11-17 11:39:55,334 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Time taken to scan block pool BP-655327536-127.0.1.1-1762954302619 on /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data: 88ms 2025-11-17 11:39:55,335 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Total time to scan all replicas for block pool BP-655327536-127.0.1.1-1762954302619: 96ms 2025-11-17 11:39:55,337 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Adding replicas to map for block pool BP-655327536-127.0.1.1-1762954302619 on volume /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data... 2025-11-17 11:39:55,337 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.BlockPoolSlice: Replica Cache file: /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/replicas doesn't exist 2025-11-17 11:39:55,427 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Time to add replicas to map for block pool BP-655327536-127.0.1.1-1762954302619 on volume /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data: 90ms 2025-11-17 11:39:55,428 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: Total time to add all replicas to map for block pool BP-655327536-127.0.1.1-1762954302619: 92ms 2025-11-17 11:39:55,428 INFO org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker: Scheduling a check for /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data 2025-11-17 11:39:55,447 INFO org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker: Scheduled health check for volume /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data 2025-11-17 11:39:55,479 INFO org.apache.hadoop.hdfs.server.datanode.VolumeScanner: VolumeScanner(/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data, DS-5c441fde-88f8-4f39-a77a-075dfdc1cdf9): no suitable block pools found to scan. Waiting 1417960410 ms. 2025-11-17 11:39:55,486 WARN org.apache.hadoop.hdfs.server.datanode.DirectoryScanner: dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2025-11-17 11:39:55,487 INFO org.apache.hadoop.hdfs.server.datanode.DirectoryScanner: Periodic Directory Tree Verification scan starting in 3506423ms with interval of 21600000ms and throttle limit of -1ms/s 2025-11-17 11:39:55,504 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Block pool BP-655327536-127.0.1.1-1762954302619 (Datanode Uuid c229f5e4-2241-43fd-8c64-6086b77fa49a) service to localhost/127.0.0.1:9000 beginning handshake with NN 2025-11-17 11:39:55,682 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Block pool BP-655327536-127.0.1.1-1762954302619 (Datanode Uuid c229f5e4-2241-43fd-8c64-6086b77fa49a) service to localhost/127.0.0.1:9000 successfully registered with NN 2025-11-17 11:39:55,682 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: For namenode localhost/127.0.0.1:9000 using BLOCKREPORT_INTERVAL of 21600000msecs CACHEREPORT_INTERVAL of 10000msecs Initial delay: 0msecs; heartBeatInterval=3000 2025-11-17 11:39:56,204 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Successfully sent block report 0xdb29685bfe76cad with lease ID 0xf06eba0d25825652 to namenode: localhost/127.0.0.1:9000, containing 1 storage report(s), of which we sent 1. The reports had 59 total blocks and used 1 RPC(s). This took 23 msecs to generate and 224 msecs for RPC and NN processing. Got back one command: FinalizeCommand/5. 2025-11-17 11:39:56,208 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Got finalize command for block pool BP-655327536-127.0.1.1-1762954302619 2025-11-17 11:41:22,791 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741919_1102 src: /127.0.0.1:39016 dest: /127.0.0.1:9866 2025-11-17 11:41:22,834 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:39016, dest: /127.0.0.1:9866, bytes: 202, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-433486687_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741919_1102, duration(ns): 15716338 2025-11-17 11:41:22,834 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741919_1102, type=LAST_IN_PIPELINE terminating 2025-11-17 11:41:25,717 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: BlockRecoveryWorker: NameNode at localhost/127.0.0.1:9000 calls recoverBlock(BP-655327536-127.0.1.1-1762954302619:blk_1073741918_1101, targets=[DatanodeInfoWithStorage[127.0.0.1:9866,null,null]], newGenerationStamp=1103, newBlock=null, isStriped=false) 2025-11-17 11:41:25,719 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: initReplicaRecovery: blk_1073741918_1101, recoveryId=1103, replica=ReplicaWaitingToBeRecovered, blk_1073741918_1101, RWR getNumBytes() = 85 getBytesOnDisk() = 85 getVisibleLength()= -1 getVolume() = /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data getBlockURI() = file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/rbw/blk_1073741918 2025-11-17 11:41:25,719 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: initReplicaRecovery: changing replica state for blk_1073741918_1101 from RWR to RUR 2025-11-17 11:41:25,721 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: BlockRecoveryWorker: block=BP-655327536-127.0.1.1-1762954302619:blk_1073741918_1101 (length=0), isTruncateRecovery=false, syncList=[block:blk_1073741918_1101[numBytes=85,originalReplicaState=RWR] node:DatanodeInfoWithStorage[127.0.0.1:9866,null,null]] 2025-11-17 11:41:25,721 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: BlockRecoveryWorker: block=BP-655327536-127.0.1.1-1762954302619:blk_1073741918_1101 (length=0), bestState=RWR, newBlock=BP-655327536-127.0.1.1-1762954302619:blk_1073741918_1103 (length=85), participatingList=[block:blk_1073741918_1101[numBytes=85,originalReplicaState=RWR] node:DatanodeInfoWithStorage[127.0.0.1:9866,null,null]] 2025-11-17 11:41:25,722 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl: updateReplica: BP-655327536-127.0.1.1-1762954302619:blk_1073741918_1101[numBytes=85,originalReplicaState=RWR], recoveryId=1103, length=85, replica=ReplicaUnderRecovery, blk_1073741918_1101, RUR getNumBytes() = 85 getBytesOnDisk() = 85 getVisibleLength()= -1 getVolume() = /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data getBlockURI() = file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/rbw/blk_1073741918 recoveryId=1103 original=ReplicaWaitingToBeRecovered, blk_1073741918_1101, RWR getNumBytes() = 85 getBytesOnDisk() = 85 getVisibleLength()= -1 getVolume() = /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data getBlockURI() = file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/rbw/blk_1073741918 2025-11-17 11:41:27,636 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741920_1104 src: /127.0.0.1:39028 dest: /127.0.0.1:9866 2025-11-17 11:41:28,738 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_1073741911_1091 replica FinalizedReplica, blk_1073741911_1091, FINALIZED getNumBytes() = 202 getBytesOnDisk() = 202 getVisibleLength()= 202 getVolume() = /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data getBlockURI() = file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/finalized/subdir0/subdir0/blk_1073741911 for deletion 2025-11-17 11:41:28,742 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-655327536-127.0.1.1-1762954302619 blk_1073741911_1091 URI file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/finalized/subdir0/subdir0/blk_1073741911 2025-11-17 11:41:31,695 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_1073741918_1103 replica FinalizedReplica, blk_1073741918_1103, FINALIZED getNumBytes() = 85 getBytesOnDisk() = 85 getVisibleLength()= 85 getVolume() = /home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data getBlockURI() = file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/finalized/subdir0/subdir0/blk_1073741918 for deletion 2025-11-17 11:41:31,696 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-655327536-127.0.1.1-1762954302619 blk_1073741918_1103 URI file:/home/zwk/hadoop/hadoop-3.3.5/tmp/dfs/data/current/BP-655327536-127.0.1.1-1762954302619/current/finalized/subdir0/subdir0/blk_1073741918 2025-11-17 11:41:33,777 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741921_1105 src: /127.0.0.1:47140 dest: /127.0.0.1:9866 2025-11-17 11:41:34,429 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741922_1106 src: /127.0.0.1:47168 dest: /127.0.0.1:9866 2025-11-17 11:49:36,751 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741923_1107 src: /127.0.0.1:53574 dest: /127.0.0.1:9866 2025-11-17 11:49:36,852 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:53574, dest: /127.0.0.1:9866, bytes: 7988, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1974153692_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741923_1107, duration(ns): 22420015 2025-11-17 11:49:36,852 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741923_1107, type=LAST_IN_PIPELINE terminating 2025-11-17 11:56:28,899 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741924_1108 src: /127.0.0.1:55434 dest: /127.0.0.1:9866 2025-11-17 11:56:28,909 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:55434, dest: /127.0.0.1:9866, bytes: 5576, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-433486687_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741924_1108, duration(ns): 7727488 2025-11-17 11:56:28,909 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741924_1108, type=LAST_IN_PIPELINE terminating 2025-11-17 11:56:29,162 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741925_1109 src: /127.0.0.1:55456 dest: /127.0.0.1:9866 2025-11-17 11:56:29,172 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:55456, dest: /127.0.0.1:9866, bytes: 5855, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-433486687_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741925_1109, duration(ns): 6272953 2025-11-17 11:56:29,174 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741925_1109, type=LAST_IN_PIPELINE terminating 2025-11-17 11:56:29,242 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-655327536-127.0.1.1-1762954302619:blk_1073741926_1110 src: /127.0.0.1:55462 dest: /127.0.0.1:9866 2025-11-17 11:56:29,260 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:55462, dest: /127.0.0.1:9866, bytes: 5055, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-433486687_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741926_1110, duration(ns): 14400418 2025-11-17 11:56:29,263 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741926_1110, type=LAST_IN_PIPELINE terminating 2025-11-17 11:56:33,524 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:39028, dest: /127.0.0.1:9866, bytes: 66426, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-433486687_1, offset: 0, srvID: c229f5e4-2241-43fd-8c64-6086b77fa49a, blockid: BP-655327536-127.0.1.1-1762954302619:blk_1073741920_1104, duration(ns): 905884789428 2025-11-17 11:56:33,524 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-655327536-127.0.1.1-1762954302619:blk_1073741920_1104, type=LAST_IN_PIPELINE terminating这其中有什么信息
11-18
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值