JDK API for compress byte array

本文详细介绍了Java中用于数据压缩的API,包括Deflater类的使用场景、参数设置及压缩与解压缩代码实现。重点突出如何通过调整压缩级别来优化数据传输和缓存负载。
API: int java.util.zip.Deflater.deflateBytes(byte[] b, int off, int len)

Usage Scenarios:
1. decrease communication load
2. decrease cache load for receiver when data amount too large
3. comrepssion level depend on data source(text or images..)

Code:
	/**
* threshold value for compress
*/
public static final int THRESHOLD = 1200;

/**
* Answer a byte array compressed in the DEFLATER format from bytes.
*
* @param bytes
* a byte array
* @return byte[] compressed bytes
* @throws IOException
*/
public static byte[] compress(byte[] bytes)
{
// Create the compressor with highest level of compression
Deflater compressor = new Deflater();
compressor.setLevel(Deflater.BEST_COMPRESSION);

// Give the compressor the data to compress
compressor.setInput(bytes);
compressor.finish();

// Create an expandable byte array to hold the compressed data.
// You cannot use an array that's the same size as the orginal because
// there is no guarantee that the compressed data will be smaller than
// the uncompressed data.
ByteArrayOutputStream bos = new ByteArrayOutputStream(bytes.length);

// Compress the data
byte[] buf = new byte[1024];
while (!compressor.finished())
{
int count = compressor.deflate(buf);
bos.write(buf, 0, count);
}
try
{
bos.close();
}
catch (IOException e)
{
}

// Get the compressed data
byte[] compressedData = bos.toByteArray();
return compressedData;
}

/**
* Answer a byte array that has been decompressed from the DEFLATER format.
*
* @param bytes
* a byte array
* @return byte[] compressed bytes
* @throws IOException
*/
public static byte[] uncompress(byte[] bytes)
{
// Create the decompressor and give it the data to compress
Inflater decompressor = new Inflater();
decompressor.setInput(bytes);

// Create an expandable byte array to hold the decompressed data
ByteArrayOutputStream bos = new ByteArrayOutputStream(bytes.length);

// Decompress the data
byte[] buf = new byte[1024];
while (!decompressor.finished())
{
try
{
int count = decompressor.inflate(buf);
bos.write(buf, 0, count);
}
catch (DataFormatException e)
{
}
}
try
{
bos.close();
}
catch (IOException e)
{
}

// Get the decompressed data
byte[] decompressedData = bos.toByteArray();
return decompressedData;
}
package com.example; import java.io.*; import java.nio.ByteBuffer; import java.nio.file.*; import java.time.*; import java.util.*; import java.util.stream.*; import net.lingala.zip4j.ZipFile; import net.jpountz.lz4.LZ4Compressor; import net.jpountz.lz4.LZ4Factory; import net.jpountz.lz4.LZ4FastDecompressor; public class CompressionRunner { public static void main(String[] args) throws Exception { Path inputZip = Paths.get("/home/zhouweixiang/addSecret/zip-generator/target/output/map_data_0.zip"); Path unzipDir = Paths.get("now"); Path lz4File = Paths.get("backup.lz4"); Path lz4UnzipDir = Paths.get("unzipped_lz4"); Instant totalStart = Instant.now(); // 1. 解压 ZIP (Zip4j + ZStandard) if (shouldSkip("1. ZIP 解压", unzipDir)) { System.out.println("⚠️ 已跳过:目标目录已存在文件"); } else { Instant start = Instant.now(); System.out.println("1. 开始解压 ZIP 文件..."); unzipWithZip4j(inputZip, unzipDir); System.out.println("⏱️ ZIP 解压耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } // 2. 压缩为 LZ4(带原始长度信息) if (shouldSkip("2. LZ4 压缩", lz4File)) { System.out.println("⚠️ 已跳过:目标文件已存在"); } else { Instant start = Instant.now(); System.out.println("\n2. 开始压缩文件夹为 LZ4..."); compressToLZ4(unzipDir, lz4File); System.out.println("⏱️ LZ4 压缩耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } // 3. 解压 LZ4(使用原始长度解压) if (shouldSkip("3. LZ4 解压", lz4UnzipDir)) { System.out.println("⚠️ 已跳过:目标目录已存在文件"); } else { Instant start = Instant.now(); System.out.println("\n3. 开始解压 LZ4 文件..."); decompressLZ4(lz4File, lz4UnzipDir); System.out.println("⏱️ LZ4 解压耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } Instant totalEnd = Instant.now(); System.out.println("\n✅ 全部任务完成!"); System.out.println("总耗时: " + Duration.between(totalStart, totalEnd).getSeconds() + " 秒"); } private static boolean shouldSkip(String step, Path targetPath) throws IOException { if (Files.exists(targetPath)) { if (Files.isDirectory(targetPath)) { try (Stream<Path> list = Files.list(targetPath)) { if (list.findFirst().isPresent()) { return true; } } } else if (Files.isRegularFile(targetPath)) { return true; } } return false; } private static void unzipWithZip4j(Path zipPath, Path outputDir) throws Exception { try (ZipFile zipFile = new ZipFile(zipPath.toFile())) { zipFile.extractAll(outputDir.toString()); System.out.println("✅ ZIP 解压完成: " + outputDir); } } private static void compressToLZ4(Path sourceDir, Path lz4File) throws IOException { LZ4Factory factory = LZ4Factory.fastestInstance(); LZ4Compressor compressor = factory.highCompressor(); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(lz4File.toFile()))) { List<Path> files = new ArrayList<>(); try (Stream<Path> walk = Files.walk(sourceDir)) { walk.filter(path -> !Files.isDirectory(path)).forEach(files::add); } int total = files.size(); int count = 0; for (Path path : files) { byte[] data = Files.readAllBytes(path); // 压缩 byte[] compressed = new byte[compressor.maxCompressedLength(data.length)]; int compressedLength = compressor.compress(data, 0, data.length, compressed, 0, compressed.length); // 写入原始长度(4 字节) byte[] lengthBytes = ByteBuffer.allocate(4).putInt(data.length).array(); out.write(lengthBytes); // 写入压缩后长度(4 字节) byte[] compressedLengthBytes = ByteBuffer.allocate(4).putInt(compressedLength).array(); out.write(compressedLengthBytes); // 写入压缩数据 out.write(compressed, 0, compressedLength); System.out.print("📦"); updateProgress(++count, total); } } System.out.println("\n✅ LZ4 压缩完成: " + lz4File); } private static void decompressLZ4(Path lz4File, Path outputDir) throws IOException { LZ4Factory factory = LZ4Factory.fastestInstance(); LZ4FastDecompressor decompressor = factory.fastDecompressor(); try (InputStream in = new BufferedInputStream(new FileInputStream(lz4File.toFile()))) { byte[] lengthBuffer = new byte[4]; while (in.read(lengthBuffer) == 4) { int originalLength = ByteBuffer.wrap(lengthBuffer).getInt(); if (in.read(lengthBuffer) != 4) { throw new IOException("压缩长度数据不完整"); } int compressedLength = ByteBuffer.wrap(lengthBuffer).getInt(); byte[] compressedData = new byte[compressedLength]; if (in.read(compressedData) != compressedLength) { throw new IOException("压缩数据不完整"); } byte[] restored = new byte[originalLength]; decompressor.decompress(compressedData, 0, restored, 0, originalLength); Path tempFile = outputDir.resolve("restored.bin"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile.toFile()))) { out.write(restored); } } } System.out.println("✅ LZ4 解压完成: " + outputDir.resolve("restored.bin")); } // ✅ 辅助方法:按指定长度读取 private static byte[] readFully(InputStream in, int length) throws IOException { byte[] buffer = new byte[length]; int offset = 0; while (offset < length) { int read = in.read(buffer, offset, length - offset); if (read == -1) { throw new IOException("压缩数据不完整"); } offset += read; } return buffer; } private static void updateProgress(int current, int total) { int percent = (int) ((double) current / total * 100); int barLength = 20; int filledLength = (int) ((double) percent / 100 * barLength); String filled = new String(new char[filledLength]).replace('\0', '='); String empty = new String(new char[barLength - filledLength]).replace('\0', '-'); String bar = filled + empty; System.out.printf("\r[%s] %d%%", bar, percent); } } package com.example; import java.io.*; import java.nio.ByteBuffer; import java.nio.file.*; import java.time.*; import java.util.*; import java.util.stream.*; import net.lingala.zip4j.ZipFile; import net.jpountz.lz4.LZ4Compressor; import net.jpountz.lz4.LZ4Factory; import net.jpountz.lz4.LZ4FastDecompressor; public class CompressionRunner { public static void main(String[] args) throws Exception { Path inputZip = Paths.get("/home/zhouweixiang/addSecret/zip-generator/target/output/map_data_0.zip"); Path unzipDir = Paths.get("now"); Path lz4File = Paths.get("backup.lz4"); Path lz4UnzipDir = Paths.get("unzipped_lz4"); Instant totalStart = Instant.now(); // 1. 解压 ZIP (Zip4j + ZStandard) if (shouldSkip("1. ZIP 解压", unzipDir)) { System.out.println("⚠️ 已跳过:目标目录已存在文件"); } else { Instant start = Instant.now(); System.out.println("1. 开始解压 ZIP 文件..."); unzipWithZip4j(inputZip, unzipDir); System.out.println("⏱️ ZIP 解压耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } // 2. 压缩为 LZ4(带原始长度信息) if (shouldSkip("2. LZ4 压缩", lz4File)) { System.out.println("⚠️ 已跳过:目标文件已存在"); } else { Instant start = Instant.now(); System.out.println("\n2. 开始压缩文件夹为 LZ4..."); compressToLZ4(unzipDir, lz4File); System.out.println("⏱️ LZ4 压缩耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } // 3. 解压 LZ4(使用原始长度解压) if (shouldSkip("3. LZ4 解压", lz4UnzipDir)) { System.out.println("⚠️ 已跳过:目标目录已存在文件"); } else { Instant start = Instant.now(); System.out.println("\n3. 开始解压 LZ4 文件..."); decompressLZ4(lz4File, lz4UnzipDir); System.out.println("⏱️ LZ4 解压耗时: " + Duration.between(start, Instant.now()).getSeconds() + " 秒"); } Instant totalEnd = Instant.now(); System.out.println("\n✅ 全部任务完成!"); System.out.println("总耗时: " + Duration.between(totalStart, totalEnd).getSeconds() + " 秒"); } private static boolean shouldSkip(String step, Path targetPath) throws IOException { if (Files.exists(targetPath)) { if (Files.isDirectory(targetPath)) { try (Stream<Path> list = Files.list(targetPath)) { if (list.findFirst().isPresent()) { return true; } } } else if (Files.isRegularFile(targetPath)) { return true; } } return false; } private static void unzipWithZip4j(Path zipPath, Path outputDir) throws Exception { try (ZipFile zipFile = new ZipFile(zipPath.toFile())) { zipFile.extractAll(outputDir.toString()); System.out.println("✅ ZIP 解压完成: " + outputDir); } } private static void compressToLZ4(Path sourceDir, Path lz4File) throws IOException { LZ4Factory factory = LZ4Factory.fastestInstance(); LZ4Compressor compressor = factory.highCompressor(); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(lz4File.toFile()))) { List<Path> files = new ArrayList<>(); try (Stream<Path> walk = Files.walk(sourceDir)) { walk.filter(path -> !Files.isDirectory(path)).forEach(files::add); } int total = files.size(); int count = 0; for (Path path : files) { byte[] data = Files.readAllBytes(path); // 压缩 byte[] compressed = new byte[compressor.maxCompressedLength(data.length)]; int compressedLength = compressor.compress(data, 0, data.length, compressed, 0, compressed.length); // 写入原始长度(4 字节) byte[] lengthBytes = ByteBuffer.allocate(4).putInt(data.length).array(); out.write(lengthBytes); // 写入压缩后长度(4 字节) byte[] compressedLengthBytes = ByteBuffer.allocate(4).putInt(compressedLength).array(); out.write(compressedLengthBytes); // 写入压缩数据 out.write(compressed, 0, compressedLength); System.out.print("📦"); updateProgress(++count, total); } } System.out.println("\n✅ LZ4 压缩完成: " + lz4File); } private static void decompressLZ4(Path lz4File, Path outputDir) throws IOException { LZ4Factory factory = LZ4Factory.fastestInstance(); LZ4FastDecompressor decompressor = factory.fastDecompressor(); try (InputStream in = new BufferedInputStream(new FileInputStream(lz4File.toFile()))) { byte[] lengthBuffer = new byte[4]; while (in.read(lengthBuffer) == 4) { int originalLength = ByteBuffer.wrap(lengthBuffer).getInt(); if (in.read(lengthBuffer) != 4) { throw new IOException("压缩长度数据不完整"); } int compressedLength = ByteBuffer.wrap(lengthBuffer).getInt(); byte[] compressedData = new byte[compressedLength]; if (in.read(compressedData) != compressedLength) { throw new IOException("压缩数据不完整"); } byte[] restored = new byte[originalLength]; decompressor.decompress(compressedData, 0, restored, 0, originalLength); Path tempFile = outputDir.resolve("restored.bin"); try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile.toFile()))) { out.write(restored); } } } System.out.println("✅ LZ4 解压完成: " + outputDir.resolve("restored.bin")); } // ✅ 辅助方法:按指定长度读取 private static byte[] readFully(InputStream in, int length) throws IOException { byte[] buffer = new byte[length]; int offset = 0; while (offset < length) { int read = in.read(buffer, offset, length - offset); if (read == -1) { throw new IOException("压缩数据不完整"); } offset += read; } return buffer; } private static void updateProgress(int current, int total) { int percent = (int) ((double) current / total * 100); int barLength = 20; int filledLength = (int) ((double) percent / 100 * barLength); String filled = new String(new char[filledLength]).replace('\0', '='); String empty = new String(new char[barLength - filledLength]).replace('\0', '-'); String bar = filled + empty; System.out.printf("\r[%s] %d%%", bar, percent); } } [ERROR] Failed to execute goal org.codehaus.mojo:exec-maven-plugin:3.5.1:java (default-cli) on project fastzip-test: An exception occurred while executing the Java class. unzipped_lz4/restored.bin (没有那个文件或目录) -> [Help 1] org.apache.maven.lifecycle.LifecycleExecutionException: Failed to execute goal org.codehaus.mojo:exec-maven-plugin:3.5.1:java (default-cli) on project fastzip-test: An exception occurred while executing the Java class. unzipped_lz4/restored.bin (没有那个文件或目录) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:215) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) Caused by: org.apache.maven.plugin.MojoExecutionException: An exception occurred while executing the Java class. unzipped_lz4/restored.bin (没有那个文件或目录) at org.codehaus.mojo.exec.ExecJavaMojo.execute (ExecJavaMojo.java:349) at org.apache.maven.plugin.DefaultBuildPluginManager.executeMojo (DefaultBuildPluginManager.java:137) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:210) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:156) at org.apache.maven.lifecycle.internal.MojoExecutor.execute (MojoExecutor.java:148) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:117) at org.apache.maven.lifecycle.internal.LifecycleModuleBuilder.buildProject (LifecycleModuleBuilder.java:81) at org.apache.maven.lifecycle.internal.builder.singlethreaded.SingleThreadedBuilder.build (SingleThreadedBuilder.java:56) at org.apache.maven.lifecycle.internal.LifecycleStarter.execute (LifecycleStarter.java:128) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:305) at org.apache.maven.DefaultMaven.doExecute (DefaultMaven.java:192) at org.apache.maven.DefaultMaven.execute (DefaultMaven.java:105) at org.apache.maven.cli.MavenCli.execute (MavenCli.java:957) at org.apache.maven.cli.MavenCli.doMain (MavenCli.java:289) at org.apache.maven.cli.MavenCli.main (MavenCli.java:193) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0 (Native Method) at jdk.internal.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java:62) at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke (Method.java:566) at org.codehaus.plexus.classworlds.launcher.Launcher.launchEnhanced (Launcher.java:282) at org.codehaus.plexus.classworlds.launcher.Launcher.launch (Launcher.java:225) at org.codehaus.plexus.classworlds.launcher.Launcher.mainWithExitCode (Launcher.java:406) at org.codehaus.plexus.classworlds.launcher.Launcher.main (Launcher.java:347) Caused by: java.io.FileNotFoundException: unzipped_lz4/restored.bin (没有那个文件或目录) at java.io.FileOutputStream.open0 (Native Method) at java.io.FileOutputStream.open (FileOutputStream.java:298) at java.io.FileOutputStream.<init> (FileOutputStream.java:237) at java.io.FileOutputStream.<init> (FileOutputStream.java:187) at com.example.CompressionRunner.decompressLZ4 (CompressionRunner.java:145) at com.example.CompressionRunner.main (CompressionRunner.java:50) at org.codehaus.mojo.exec.ExecJavaMojo.doMain (ExecJavaMojo.java:371) at org.codehaus.mojo.exec.ExecJavaMojo.doExec (ExecJavaMojo.java:360) at org.codehaus.mojo.exec.ExecJavaMojo.lambda$execute$0 (ExecJavaMojo.java:280) at java.lang.Thread.run (Thread.java:829) [ERROR] [ERROR] Re-run Maven using the -X switch to enable full debug logging. [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException
08-20
21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.application.cleaner.interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.output.fileoutputformat.compress 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.subcluster.cleaner.interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.store.in-memory.staleness-period-mins 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.write.byte-array-manager.count-limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.layer-mounts-to-keep 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.providers.combined 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.running.map.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.webapp.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.placement-constraints.scheduler.pool-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.multipart.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.slow.io.warning.threshold.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.am.job.committer.commit-window 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.submithostname 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edits.asynclogging 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.blockreport.incremental.intervalMsec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.ifile.readahead 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.state-store.sql.conn-time-out 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.image-tag-to-manifest-plugin 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.socketcache.capacity 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.select.input.csv.field.delimiter 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.retry.policy.spec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.reencrypt.batch.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.connection.ssl.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.proxyuser.hadoop.hosts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.read.considerLoad 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.max.slowdisks.to.exclude 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.groups.cache.secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.peer.stats.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.replication 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.work.multiplier.per.iteration 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.directory.search.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.checksum.combine.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.max.outstanding.paths 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.sleep-delay-before-sigkill.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.apps.cache.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.automatic.close 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.reencrypt.edek.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.acls.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.short.circuit.replica.stale.threshold.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.health-checker.run-before-startup 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.send.qop.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.intermediate-done-dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.slowpeer.collect.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.server-defaults.validity.period.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.client.libjars.wildcard 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.reduce.shuffle.input.buffer.percent 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.audit.loggers 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.serializations 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.dispatcher.print-thread-pool.keep-alive-time 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.http.cross-origin.allowed-methods 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.snapshot.capture.openfiles 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.queued-edits.limit.mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.zk.acl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.container.stderr.pattern 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.cluster.local.dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.[port_number].cost-provider.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.kerberos.kinit.command 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.metrics.logger.period.seconds 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.viewfs.overload.scheme.target.abfss.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.block.access.token.lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.delegation.token.max-lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.drop.cache.behind.writes 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.system-metrics-publisher.timeline-server-v1.enable-batch 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.remove.dead.datanode.batchnum 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.submission-preprocessor.file-refresh-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.num.extra.edits.retained 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.block.placement.ec.classname 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.client.connect.max.retries.on.timeouts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.client.resolve.topology.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.http.open.timeout.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ha.health-monitor.connect-retry-interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edekcacheloader.initial.delay.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.rbf.observer.read.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.failover.resolver.useFQDN 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.mapfile.bloom.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.ftp.data.connection.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client-write-packet-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.shuffle.log.backups 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.kerberos.principal.pattern 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.webhdfs.socket.connect-timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.scheduler.monitor.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.proxyuser.hadoop.groups 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.select.output.csv.quote.character 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.task.stuck.timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.authorization 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.am.liveness-monitor.expiry-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.webapp.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.leveldb-timeline-store.path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.reduce.slowstart.completedmaps 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.delegation.token.max-lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.ha.automatic-failover.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.socket.write.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.accesstime.precision 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.conversion.rule 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.mapfile.bloom.error.rate 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.webapp.rest-csrf.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.leveldb-state-store.path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.scheduler.configuration.zk-store.parent-path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.[port_number].backoff.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.writer.flush-interval-seconds 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.posix.acl.inheritance.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.outliers.report.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.encrypted.key.cache.low-watermark 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.top.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.retry.throttle.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.webapp.rest-csrf.custom-header 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.webapp.xfs-filter.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.identity-provider.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.cached.conn.retry 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.submission-preprocessor.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.system.tags 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.image-tag-to-manifest-plugin.num-manifests-to-cache 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.least-load-policy-selector.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.numa-awareness.numactl.cmd 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.path.based.cache.refresh.interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.fs-limits.max-directory-items 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.ha.log-roll.period 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.distributed-scheduling.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.pmem.cache.recovery 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.minicluster.fixed.ports 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.queue.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.snapshot.filesystem.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource.percentage-physical-cpu-limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.fs-limits.max-xattr-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.blocks.per.postponedblocks.rescan 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.maintenance.replication.min 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.app-aggregation-interval-secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.max.op.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.iostatistics.thread.level.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.reducer.unconditional-preempt.delay.sec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.am.hard-kill-timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.connection.ttl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.permissions.superuser-only 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.df.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.cache.limit.max-single-resource-mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.assumed.role.session.duration 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.disk.balancer.block.tolerance.percent 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.webhdfs.netty.high.watermark 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.balance.max.concurrent.moves 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.log.delete.threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.token.tracking.ids.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.assumed.role.credentials.provider 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.log-container-debug-info-on-error.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.failover.sleep.max.millis 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.rest-csrf.custom-header 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.move.thread-count 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.compression.codec.zstd.level 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.http-authentication.simple.anonymous.allowed 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.provided.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.client-server.thread-count 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.scheduler.configuration.max.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.jobname.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.dispatcher.print-events-info.threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.decommission.blocks.per.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.write-txns.timeout.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.subcluster-resolver.class 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.read-lock-reporting-threshold-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.task.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource.memory-mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.container-log-monitor.total-size-limit-bytes 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.fileoutputcommitter.algorithm.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.framework.name 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.clientrm.interceptor-class.pipeline 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.system-metrics-publisher.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.nested-level 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.dns.log-slow-lookups.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.webapp.https.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for file.client-write-packet-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.client.ping 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.state-store.sql.idle-time-out 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.policy.generator.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.webapp.https.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.balancer.max-no-move-interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.minicluster.control-resource-monitoring 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.disk.balancer.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.fs.state-store.num-retries 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.uid.cache.secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.ha.automatic-failover.zk-base-path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.speculative.speculative-cap-running-tasks 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.node-labels.am.allow-non-exclusive-allocation 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.du.reserved.calculator 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.block.id.layout.upgrade.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.erasurecode.codec.native.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.client.load.resource-types.from-server 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.client.application-client-protocol.poll-timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.oob.timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.sharedcache.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.hdfs-servers 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.epoch.range 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.subcluster.heartbeat.expiration-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.map.output.compress 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.token.service.use_ip 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.encrypted.key.cache.num.refill.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edekcacheloader.interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.aux-services.mapreduce_shuffle.class 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.num.attempts.before.failover 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.du.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.read.uri.cache.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.zk.retry-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.data.transfer.server.tcpnodelay 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.http.client.failover.max.attempts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.socket.send.buffer 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.block.write.locateFollowingBlock.retries 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jvm.system-properties-to-log 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.enable.retrycache 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.encrypted-intermediate-data.buffer.kb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidia-docker-v1.endpoint 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.data.transfer.client.tcpnodelay 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.mode 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.xfs-filter.xframe-options 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.reduce.memory.mb 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.caller.context.enabled 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.nodemanagers.heartbeat-interval-speedup-factor 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.prepare-recovery.timeout.ms 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.deregister.subcluster.enabled 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.sensitive-config-keys 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.client.completion.pollinterval 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.secondary.http-address 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.interceptor.allow-partial-result.enable 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.https.address 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.retry.throttle.limit 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.permissions.allow.owner.set.quota 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.domainname.resolver.impl 21:37:31.597 [main] INFO org.apache.hadoop.mapreduce.Job - The url to track the job: http://localhost:8080/ 21:37:31.598 [main] INFO org.apache.hadoop.mapreduce.Job - Running job: job_local1106899704_0001 21:37:31.601 [Thread-5] INFO org.apache.hadoop.mapred.LocalJobRunner - OutputCommitter set in config null 21:37:31.603 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@7c6442c2] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:31.611 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@2098d37d] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1737) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:31.612 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - Looking for committer factory for path hdfs://192.168.88.101:8020/output 21:37:31.612 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - No scheme-specific factory defined in mapreduce.outputcommitter.factory.scheme.hdfs 21:37:31.612 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - No output committer factory defined, defaulting to FileOutputCommitterFactory 21:37:31.613 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - Creating FileOutputCommitter for path hdfs://192.168.88.101:8020/output and context TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''} 21:37:31.613 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter - Instantiating committer FileOutputCommitter{PathOutputCommitter{context=TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''}; org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter@9f9d7ce}; outputPath=null, workPath=null, algorithmVersion=0, skipCleanup=false, ignoreCleanupFailures=false} with output path hdfs://192.168.88.101:8020/output and job context TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''} 21:37:31.614 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - File Output Committer Algorithm version is 2 21:37:31.614 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - FileOutputCommitter skip cleanup _temporary folders under output directory:false, ignore cleanup failures: false 21:37:31.615 [Thread-5] INFO org.apache.hadoop.mapred.LocalJobRunner - OutputCommitter is org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter 21:37:31.622 [Thread-5] DEBUG org.apache.hadoop.fs.statistics.impl.IOStatisticsContextIntegration - Created instance IOStatisticsContextImpl{id=2, threadId=32, ioStatistics=counters=(); gauges=(); minimums=(); maximums=(); means=(); } 21:37:31.629 [Thread-5] DEBUG org.apache.hadoop.hdfs.DFSClient - /output/_temporary/0: masked={ masked: rwxr-xr-x, unmasked: rwxrwxrwx } 21:37:31.637 [IPC Parameter Sending Thread for xxjdxnj/192.168.88.101:8020] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from СIPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С sending #3 org.apache.hadoop.hdfs.protocol.ClientProtocol.mkdirs 21:37:31.649 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С got value #3 21:37:31.654 [Thread-5] DEBUG org.apache.hadoop.io.retry.RetryInvocationHandler - Exception while invoking call #3 ClientNamenodeProtocolTranslatorPB.mkdirs over null. Not retrying because try once and fail. org.apache.hadoop.ipc.RemoteException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) at org.apache.hadoop.ipc.Client.call(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1426) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) at jdk.proxy2/jdk.proxy2.$Proxy11.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$mkdirs$20(ClientNamenodeProtocolTranslatorPB.java:611) at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:611) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) at jdk.proxy2/jdk.proxy2.$Proxy12.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2555) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2531) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1497) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1494) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1511) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1486) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2494) at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:356) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:541) 21:37:31.663 [IPC Parameter Sending Thread for xxjdxnj/192.168.88.101:8020] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from СIPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С sending #4 org.apache.hadoop.hdfs.protocol.ClientProtocol.delete 21:37:31.674 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С got value #4 21:37:31.675 [Thread-5] DEBUG org.apache.hadoop.ipc.ProtobufRpcEngine2 - Call: delete took 12ms 21:37:31.678 [Thread-5] WARN org.apache.hadoop.mapred.LocalJobRunner - job_local1106899704_0001 org.apache.hadoop.security.AccessControlException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2557) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2531) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1497) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1494) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1511) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1486) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2494) at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:356) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:541) Caused by: org.apache.hadoop.ipc.RemoteException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) at org.apache.hadoop.ipc.Client.call(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1426) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) at jdk.proxy2/jdk.proxy2.$Proxy11.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$mkdirs$20(ClientNamenodeProtocolTranslatorPB.java:611) at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:611) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) at jdk.proxy2/jdk.proxy2.$Proxy12.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2555) ... 9 common frames omitted 21:37:31.683 [Thread-5] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.fs.FileContext$2@15fc336f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:343) at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:465) at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:442) at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:428) at org.apache.hadoop.mapred.LocalDistributedCacheManager.close(LocalDistributedCacheManager.java:268) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:598) 21:37:32.626 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@77b9d0c7] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1866) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1747) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.626 [main] INFO org.apache.hadoop.mapreduce.Job - Job job_local1106899704_0001 running in uber mode : false 21:37:32.628 [main] INFO org.apache.hadoop.mapreduce.Job - map 0% reduce 0% 21:37:32.628 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$6@3b0ee03a] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:730) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1759) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.629 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@796065aa] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.629 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@28a6301f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1737) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.630 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$6@2c306a57] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:730) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1759) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.630 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@773e2eb5] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.631 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@d8948cd] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isSuccessful(Job.java:625) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1763) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.631 [main] INFO org.apache.hadoop.mapreduce.Job - Job job_local1106899704_0001 failed with state FAILED due to: NA 21:37:32.631 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$8@7abe27bf] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:818) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1770) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.651 [main] INFO org.apache.hadoop.mapreduce.Job - Counters: 0 21:37:32.651 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@2679311f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isSuccessful(Job.java:625) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1710) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.653 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1530)); Key: (С (auth:SIMPLE))@hdfs://192.168.88.101:8020; URI: hdfs://192.168.88.101:8020; Object Identity Hash: 2e075efe 21:37:32.653 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - stopping client from cache: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - removing client from cache: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - stopping actual client because no more references remain: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - Stopping client 21:37:32.655 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С: closed 21:37:32.655 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С: stopped, remaining connections 0 21:37:32.655 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.fs.FilterFileSystem.close(FilterFileSystem.java:529)); Key: (С (auth:SIMPLE))@file://; URI: file:///; Object Identity Hash: 2a38dfe6 21:37:32.655 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.fs.RawLocalFileSystem.close(RawLocalFileSystem.java:895)); Key: null; URI: file:///; Object Identity Hash: 6f3a54c5 21:37:32.656 [shutdown-hook-0] DEBUG org.apache.hadoop.hdfs.KeyProviderCache - Invalidating all cached KeyProviders. 21:37:32.656 [Thread-1] DEBUG org.apache.hadoop.util.ShutdownHookManager - Completed shutdown in 0.004 seconds; Timeouts: 0 21:37:32.664 [Thread-1] DEBUG org.apache.hadoop.util.ShutdownHookManager - ShutdownHookManager completed shutdown. Process finished with exit code 1
06-22
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值