求助!!!Simple_window.h在线等 ,用的code ::blocks

本文记录了一次使用C++进行GUI编程时遇到的编译错误经历,主要问题是找不到必要的头文件,如Simple_window.h和GUI.h。作者通过逐步添加缺失的头文件到项目的include路径中来解决这个问题。

这是源码:

提示错误为:G:\test code\GUI\main.cpp|2|fatal error: Simple_window.h: No such file or directory|

该如何设置呢?当我把Simple_window.h这个头文件添加到安装软件所在的include文件后运行

又提示错误:GUI.h| No such file or directory|

每添加完一个头文件就提示另一个没有??

#include"std_lib_facilities.h"

#include"Simple_window.h"


#include"Graph.h"
using namespace std;


int main()
{
    using namespace Graph_lib;
    Point tl(100,100);
    Simple_window win (tl,600,400,"Canvas");
    Polygon poly;
    poly.add(Point(300,200));
    poly.add(Point(350,100));
    poly.add(Point(400,200));


    poly.set_color(Color::red);
    win.attach(poly);
    win.wait_for_button();
}
把”# -*- coding: utf-8 -*- import gradio as gr import requests import time import threading # 添加全局状态跟踪 server_status = { "last_check": 0, "is_online": False, "loading": False } def check_server_status(): """检查模型服务器状态""" try: # 尝试检查状态端点 resp = requests.get("http://127.0.0.1:5000/status", timeout=3) if resp.status_code == 200: data = resp.json() # 检查服务是否运行且模型已加载 server_status["is_online"] = data.get("model_loaded", False) and data.get("status") == "running" server_status["last_check"] = time.time() return server_status["is_online"] except Exception as e: print(f"状态检查错误: {str(e)}") server_status["is_online"] = False return server_status["is_online"] def chat_interface(user_input, history): """处理用户输入并获取模型响应""" # 每30秒检查一次服务器状态 if time.time() - server_status["last_check"] > 30: threading.Thread(target=check_server_status).start() # 显示服务器状态提示 if not server_status["is_online"]: return "[系统] 模型服务器未响应,请检查服务是否已启动", history try: server_status["loading"] = True start_time = time.time() # 构建包含历史记录的完整上下文 full_context = "\n".join([f"User: {h[0]}\nAI: {h[1]}" for h in history]) full_context += f"\nUser: {user_input}" response = requests.post( "http://127.0.0.1:5000/generate", json={ "prompt": full_context, "max_length": 1024 # 添加长度限制 }, timeout=180 # 更长超时时间 ) if response.status_code == 200: ai_response = response.json().get("response", "No response") response_time = time.time() - start_time formatted_response = f"{ai_response}\n\n⏱️ 响应时间: {response_time:.2f}秒" return formatted_response, history else: return f"[错误] 服务器返回状态码 {response.status_code}", history except requests.exceptions.Timeout: return "[超时] 模型响应时间过长,请稍后重试", history except Exception as e: return f"[错误] 发生异常: {str(e)}", history finally: server_status["loading"] = False # 创建聊天界面 with gr.Blocks(title="DeepSeek-7B Chat") as demo: gr.Markdown("# 🧠 DeepSeek-7B 对话系统") gr.Markdown("> 输入问题后按Enter提交,模型可能需要10-30秒响应") with gr.Row(): chatbot = gr.Chatbot(label="对话历史", height=500) with gr.Column(): gr.Markdown("### 使用说明") gr.Markdown("1. 输入问题后按Enter提交") gr.Markdown("2. 长回复可能需要30秒以上") gr.Markdown("3. 清除按钮会重置对话") server_status_box = gr.Textbox(label="服务状态", value="正在检测服务...", interactive=False) msg = gr.Textbox(label="输入消息", placeholder="输入您的问题...") with gr.Row(): submit_btn = gr.Button("发送") clear = gr.Button("清除对话") retry_btn = gr.Button("重试连接") # 更新服务器状态函数 def update_status(): is_online = check_server_status() status = "🟢 在线" if is_online else "🔴 离线" return f"{status} | 最后检查: {time.strftime('%H:%M:%S')}" # 响应处理函数 def respond(message, chat_history): bot_message, _ = chat_interface(message, chat_history) chat_history.append((message, bot_message)) return "", chat_history # 清除对话 def clear_chat(): return [] # 重试连接 def retry_connection(): is_online = check_server_status() status = "🟢 在线" if is_online else "🔴 离线" return f"{status} | 最后检查: {time.strftime('%H:%M:%S')}" # 组件交互 msg.submit(respond, [msg, chatbot], [msg, chatbot]) submit_btn.click(respond, [msg, chatbot], [msg, chatbot]) clear.click(clear_chat, outputs=[chatbot]) retry_btn.click(retry_connection, outputs=[server_status_box]) # 初始化检查 demo.load(update_status, outputs=[server_status_box]) if __name__ == "__main__": # 初始状态检查 check_server_status() # 添加连接测试 print("="*50) print("测试模型服务器连接...") try: test_resp = requests.get("http://127.0.0.1:5000/status", timeout=3) print(f"连接测试结果: 状态码 {test_resp.status_code}") if test_resp.status_code == 200: print(f"服务状态: {test_resp.json()}") except Exception as e: print(f"连接失败: {str(e)}") print("="*50) # 启动界面 demo.launch( server_port=7860, share=False, server_name="0.0.0.0" )“改成# model_server/simple_ui.py import sys import threading from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QSplitter, QTabWidget, QTextEdit, QPushButton, QComboBox, QSlider, QLabel, QGroupBox) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtGui import QPixmap, QImage import requests import numpy as np import cv2 class AIStudioUI(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("AI 工作室") self.setGeometry(100, 100, 1200, 800) # 主布局 main_widget = QWidget() main_layout = QHBoxLayout() main_widget.setLayout(main_layout) self.setCentralWidget(main_widget) # 左侧控制面板 control_panel = self.create_control_panel() main_layout.addWidget(control_panel, 1) # 占1份宽度 # 右侧主内容区 splitter = QSplitter(Qt.Vertical) # 输入面板 input_panel = self.create_input_panel() splitter.addWidget(input_panel) # 输出面板 output_panel = self.create_output_panel() splitter.addWidget(output_panel) # 状态面板 status_panel = self.create_status_panel() splitter.addWidget(status_panel) splitter.setSizes([300, 400, 100]) # 设置各区域高度比例 main_layout.addWidget(splitter, 3) # 占3份宽度 # 定时更新状态 self.timer = QTimer() self.timer.timeout.connect(self.update_system_status) self.timer.start(2000) # 每2秒更新一次状态 # 后续定义各面板创建函数... 对吗?
08-13
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/fs.h> #include <linux/minix_fs.h> #include <linux/ext2_fs.h> #include <linux/romfs_fs.h> #include <uapi/linux/cramfs_fs.h> #include <linux/initrd.h> #include <linux/string.h> #include <linux/slab.h> #include "do_mounts.h" #include "../fs/squashfs/squashfs_fs.h" #include <linux/decompress/generic.h> static struct file *in_file, *out_file; static loff_t in_pos, out_pos; static int __init prompt_ramdisk(char *str) { pr_warn("ignoring the deprecated prompt_ramdisk= option\n"); return 1; } __setup("prompt_ramdisk=", prompt_ramdisk); int __initdata rd_image_start; /* starting block # of image */ static int __init ramdisk_start_setup(char *str) { rd_image_start = simple_strtol(str,NULL,0); return 1; } __setup("ramdisk_start=", ramdisk_start_setup); static int __init crd_load(decompress_fn deco); /* * This routine tries to find a RAM disk image to load, and returns the * number of blocks to read for a non-compressed image, 0 if the image * is a compressed image, and -1 if an image with the right magic * numbers could not be found. * * We currently check for the following magic numbers: * minix * ext2 * romfs * cramfs * squashfs * gzip * bzip2 * lzma * xz * lzo * lz4 */ static int __init identify_ramdisk_image(struct file *file, loff_t pos, decompress_fn *decompressor) { const int size = 512; struct minix_super_block *minixsb; struct romfs_super_block *romfsb; struct cramfs_super *cramfsb; struct squashfs_super_block *squashfsb; int nblocks = -1; unsigned char *buf; const char *compress_name; unsigned long n; int start_block = rd_image_start; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; minixsb = (struct minix_super_block *) buf; romfsb = (struct romfs_super_block *) buf; cramfsb = (struct cramfs_super *) buf; squashfsb = (struct squashfs_super_block *) buf; memset(buf, 0xe5, size); /* * Read block 0 to test for compressed kernel */ pos = start_block * BLOCK_SIZE; kernel_read(file, buf, size, &pos); *decompressor = decompress_method(buf, size, &compress_name); if (compress_name) { printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n", compress_name, start_block); if (!*decompressor) printk(KERN_EMERG "RAMDISK: %s decompressor not configured!\n", compress_name); nblocks = 0; goto done; } /* romfs is at block zero too */ if (romfsb->word0 == ROMSB_WORD0 && romfsb->word1 == ROMSB_WORD1) { printk(KERN_NOTICE "RAMDISK: romfs filesystem found at block %d\n", start_block); nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS; goto done; } if (cramfsb->magic == CRAMFS_MAGIC) { printk(KERN_NOTICE "RAMDISK: cramfs filesystem found at block %d\n", start_block); nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; goto done; } /* squashfs is at block zero too */ if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) { printk(KERN_NOTICE "RAMDISK: squashfs filesystem found at block %d\n", start_block); nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; goto done; } /* * Read 512 bytes further to check if cramfs is padded */ pos = start_block * BLOCK_SIZE + 0x200; kernel_read(file, buf, size, &pos); if (cramfsb->magic == CRAMFS_MAGIC) { printk(KERN_NOTICE "RAMDISK: cramfs filesystem found at block %d\n", start_block); nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; goto done; } /* * Read block 1 to test for minix and ext2 superblock */ pos = (start_block + 1) * BLOCK_SIZE; kernel_read(file, buf, size, &pos); /* Try minix */ if (minixsb->s_magic == MINIX_SUPER_MAGIC || minixsb->s_magic == MINIX_SUPER_MAGIC2) { printk(KERN_NOTICE "RAMDISK: Minix filesystem found at block %d\n", start_block); nblocks = minixsb->s_nzones << minixsb->s_log_zone_size; goto done; } /* Try ext2 */ n = ext2_image_size(buf); if (n) { printk(KERN_NOTICE "RAMDISK: ext2 filesystem found at block %d\n", start_block); nblocks = n; goto done; } printk(KERN_NOTICE "RAMDISK: Couldn't find valid RAM disk image starting at %d.\n", start_block); done: kfree(buf); return nblocks; } static unsigned long nr_blocks(struct file *file) { struct inode *inode = file->f_mapping->host; if (!S_ISBLK(inode->i_mode)) return 0; return i_size_read(inode) >> 10; } int __init rd_load_image(char *from) { int res = 0; unsigned long rd_blocks, devblocks; int nblocks, i; char *buf = NULL; unsigned short rotate = 0; decompress_fn decompressor = NULL; #if !defined(CONFIG_S390) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif out_file = filp_open("/dev/ram", O_RDWR, 0); if (IS_ERR(out_file)) goto out; in_file = filp_open(from, O_RDONLY, 0); if (IS_ERR(in_file)) goto noclose_input; in_pos = rd_image_start * BLOCK_SIZE; nblocks = identify_ramdisk_image(in_file, in_pos, &decompressor); if (nblocks < 0) goto done; if (nblocks == 0) { if (crd_load(decompressor) == 0) goto successful_load; goto done; } /* * NOTE NOTE: nblocks is not actually blocks but * the number of kibibytes of data to load into a ramdisk. */ rd_blocks = nr_blocks(out_file); if (nblocks > rd_blocks) { printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n", nblocks, rd_blocks); goto done; } /* * OK, time to copy in the data */ if (strcmp(from, "/initrd.image") == 0) devblocks = nblocks; else devblocks = nr_blocks(in_file); if (devblocks == 0) { printk(KERN_ERR "RAMDISK: could not determine device size\n"); goto done; } buf = kmalloc(BLOCK_SIZE, GFP_KERNEL); if (!buf) { printk(KERN_ERR "RAMDISK: could not allocate buffer\n"); goto done; } printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ", nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : ""); for (i = 0; i < nblocks; i++) { if (i && (i % devblocks == 0)) { pr_cont("done disk #1.\n"); rotate = 0; fput(in_file); break; } kernel_read(in_file, buf, BLOCK_SIZE, &in_pos); kernel_write(out_file, buf, BLOCK_SIZE, &out_pos); #if !defined(CONFIG_S390) if (!(i % 16)) { pr_cont("%c\b", rotator[rotate & 0x3]); rotate++; } #endif } pr_cont("done.\n"); successful_load: res = 1; done: fput(in_file); noclose_input: fput(out_file); out: kfree(buf); init_unlink("/dev/ram"); return res; } int __init rd_load_disk(int n) { create_dev("/dev/root", ROOT_DEV); create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n)); return rd_load_image("/dev/root"); } static int exit_code; static int decompress_error; static long __init compr_fill(void *buf, unsigned long len) { long r = kernel_read(in_file, buf, len, &in_pos); if (r < 0) printk(KERN_ERR "RAMDISK: error while reading compressed data"); else if (r == 0) printk(KERN_ERR "RAMDISK: EOF while reading compressed data"); return r; } static long __init compr_flush(void *window, unsigned long outcnt) { long written = kernel_write(out_file, window, outcnt, &out_pos); if (written != outcnt) { if (decompress_error == 0) printk(KERN_ERR "RAMDISK: incomplete write (%ld != %ld)\n", written, outcnt); decompress_error = 1; return -1; } return outcnt; } static void __init error(char *x) { printk(KERN_ERR "%s\n", x); exit_code = 1; decompress_error = 1; } static int __init crd_load(decompress_fn deco) { int result; if (!deco) { pr_emerg("Invalid ramdisk decompression routine. " "Select appropriate config option.\n"); panic("Could not decompress initial ramdisk image."); } result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error); if (decompress_error) result = 1; return result; }
最新发布
10-24
21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.application.cleaner.interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.output.fileoutputformat.compress 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.subcluster.cleaner.interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.store.in-memory.staleness-period-mins 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.write.byte-array-manager.count-limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.layer-mounts-to-keep 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.providers.combined 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.running.map.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.webapp.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.placement-constraints.scheduler.pool-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.multipart.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.slow.io.warning.threshold.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.am.job.committer.commit-window 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.submithostname 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edits.asynclogging 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.blockreport.incremental.intervalMsec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.ifile.readahead 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.state-store.sql.conn-time-out 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.image-tag-to-manifest-plugin 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.socketcache.capacity 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.select.input.csv.field.delimiter 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.retry.policy.spec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.reencrypt.batch.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.connection.ssl.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.proxyuser.hadoop.hosts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.read.considerLoad 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.max.slowdisks.to.exclude 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.groups.cache.secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.peer.stats.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.replication 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.work.multiplier.per.iteration 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.directory.search.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.checksum.combine.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.max.outstanding.paths 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.sleep-delay-before-sigkill.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.apps.cache.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.automatic.close 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.reencrypt.edek.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.acls.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.short.circuit.replica.stale.threshold.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.health-checker.run-before-startup 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.send.qop.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.intermediate-done-dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.slowpeer.collect.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.server-defaults.validity.period.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.client.libjars.wildcard 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.reduce.shuffle.input.buffer.percent 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.audit.loggers 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.serializations 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.dispatcher.print-thread-pool.keep-alive-time 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.http.cross-origin.allowed-methods 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.snapshot.capture.openfiles 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.queued-edits.limit.mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.zk.acl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.container.stderr.pattern 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.cluster.local.dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.[port_number].cost-provider.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.kerberos.kinit.command 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.metrics.logger.period.seconds 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.viewfs.overload.scheme.target.abfss.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.block.access.token.lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.delegation.token.max-lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.drop.cache.behind.writes 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.system-metrics-publisher.timeline-server-v1.enable-batch 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.remove.dead.datanode.batchnum 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.submission-preprocessor.file-refresh-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.num.extra.edits.retained 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.block.placement.ec.classname 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.client.connect.max.retries.on.timeouts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.client.resolve.topology.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.http.open.timeout.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ha.health-monitor.connect-retry-interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edekcacheloader.initial.delay.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.rbf.observer.read.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.failover.resolver.useFQDN 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.mapfile.bloom.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.ftp.data.connection.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client-write-packet-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.shuffle.log.backups 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.kerberos.principal.pattern 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.webhdfs.socket.connect-timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.scheduler.monitor.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.proxyuser.hadoop.groups 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.select.output.csv.quote.character 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.task.stuck.timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.authorization 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.am.liveness-monitor.expiry-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.webapp.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.leveldb-timeline-store.path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.reduce.slowstart.completedmaps 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.delegation.token.max-lifetime 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.ha.automatic-failover.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.socket.write.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.accesstime.precision 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.conversion.rule 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.mapfile.bloom.error.rate 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.webapp.rest-csrf.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.leveldb-state-store.path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.scheduler.configuration.zk-store.parent-path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.[port_number].backoff.enable 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.writer.flush-interval-seconds 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.posix.acl.inheritance.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.outliers.report.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.encrypted.key.cache.low-watermark 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.top.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.retry.throttle.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.webapp.rest-csrf.custom-header 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.webapp.xfs-filter.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.identity-provider.impl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.cached.conn.retry 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.submission-preprocessor.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.system.tags 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.runtime.linux.runc.image-tag-to-manifest-plugin.num-manifests-to-cache 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.least-load-policy-selector.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.numa-awareness.numactl.cmd 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.path.based.cache.refresh.interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.fs-limits.max-directory-items 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.ha.log-roll.period 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.distributed-scheduling.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.pmem.cache.recovery 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.minicluster.fixed.ports 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.queue.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.snapshot.filesystem.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource.percentage-physical-cpu-limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.fs-limits.max-xattr-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.blocks.per.postponedblocks.rescan 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.maintenance.replication.min 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.app-aggregation-interval-secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.max.op.size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.iostatistics.thread.level.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.reducer.unconditional-preempt.delay.sec 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.app.mapreduce.am.hard-kill-timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.connection.ttl 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.permissions.superuser-only 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.df.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.cache.limit.max-single-resource-mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.assumed.role.session.duration 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.disk.balancer.block.tolerance.percent 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.webhdfs.netty.high.watermark 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.balance.max.concurrent.moves 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.log.delete.threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.token.tracking.ids.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.assumed.role.credentials.provider 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.log-container-debug-info-on-error.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.failover.sleep.max.millis 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.rest-csrf.custom-header 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.move.thread-count 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.compression.codec.zstd.level 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.timeline-service.http-authentication.simple.anonymous.allowed 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.provided.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.client-server.thread-count 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.scheduler.configuration.max.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.jobname.limit 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.dispatcher.print-events-info.threshold 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.decommission.blocks.per.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.write-txns.timeout.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.subcluster-resolver.class 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.read-lock-reporting-threshold-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.task.timeout 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource.memory-mb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.container-log-monitor.total-size-limit-bytes 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.fileoutputcommitter.algorithm.version 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.framework.name 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.clientrm.interceptor-class.pipeline 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.system-metrics-publisher.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.sharedcache.nested-level 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.dns.log-slow-lookups.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jobhistory.webapp.https.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for file.client-write-packet-size 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for ipc.client.ping 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.state-store.sql.idle-time-out 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.policy.generator.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.webapp.https.address 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.balancer.max-no-move-interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.minicluster.control-resource-monitoring 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.disk.balancer.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.fs.state-store.num-retries 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.uid.cache.secs 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.ha.automatic-failover.zk-base-path 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.speculative.speculative-cap-running-tasks 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.node-labels.am.allow-non-exclusive-allocation 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.du.reserved.calculator 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.block.id.layout.upgrade.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for io.erasurecode.codec.native.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.client.load.resource-types.from-server 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.client.application-client-protocol.poll-timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.datanode.oob.timeout-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.sharedcache.mode 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.hdfs-servers 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.epoch.range 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.federation.gpg.subcluster.heartbeat.expiration-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.map.output.compress 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.token.service.use_ip 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.kms.client.encrypted.key.cache.num.refill.threads 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.edekcacheloader.interval.ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.aux-services.mapreduce_shuffle.class 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.group.mapping.ldap.num.attempts.before.failover 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.du.interval 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.read.uri.cache.enabled 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.zk.retry-interval-ms 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.data.transfer.server.tcpnodelay 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.dir 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.http.client.failover.max.attempts 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.socket.send.buffer 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.client.block.write.locateFollowingBlock.retries 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.jvm.system-properties-to-log 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.enable.retrycache 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.job.encrypted-intermediate-data.buffer.kb 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidia-docker-v1.endpoint 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.data.transfer.client.tcpnodelay 21:37:31.523 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.storage.policy.satisfier.mode 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.xfs-filter.xframe-options 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.reduce.memory.mb 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.caller.context.enabled 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.nodemanagers.heartbeat-interval-speedup-factor 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.qjournal.prepare-recovery.timeout.ms 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.deregister.subcluster.enabled 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.security.sensitive-config-keys 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for mapreduce.client.completion.pollinterval 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.namenode.secondary.http-address 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.router.interceptor.allow-partial-result.enable 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for yarn.resourcemanager.webapp.https.address 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for fs.s3a.retry.throttle.limit 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for dfs.permissions.allow.owner.set.quota 21:37:31.524 [main] DEBUG org.apache.hadoop.conf.Configuration - Handling deprecation for hadoop.domainname.resolver.impl 21:37:31.597 [main] INFO org.apache.hadoop.mapreduce.Job - The url to track the job: http://localhost:8080/ 21:37:31.598 [main] INFO org.apache.hadoop.mapreduce.Job - Running job: job_local1106899704_0001 21:37:31.601 [Thread-5] INFO org.apache.hadoop.mapred.LocalJobRunner - OutputCommitter set in config null 21:37:31.603 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@7c6442c2] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:31.611 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@2098d37d] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1737) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:31.612 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - Looking for committer factory for path hdfs://192.168.88.101:8020/output 21:37:31.612 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - No scheme-specific factory defined in mapreduce.outputcommitter.factory.scheme.hdfs 21:37:31.612 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - No output committer factory defined, defaulting to FileOutputCommitterFactory 21:37:31.613 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory - Creating FileOutputCommitter for path hdfs://192.168.88.101:8020/output and context TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''} 21:37:31.613 [Thread-5] DEBUG org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter - Instantiating committer FileOutputCommitter{PathOutputCommitter{context=TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''}; org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter@9f9d7ce}; outputPath=null, workPath=null, algorithmVersion=0, skipCleanup=false, ignoreCleanupFailures=false} with output path hdfs://192.168.88.101:8020/output and job context TaskAttemptContextImpl{JobContextImpl{jobId=job_local1106899704_0001}; taskId=attempt_local1106899704_0001_m_000000_0, status=''} 21:37:31.614 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - File Output Committer Algorithm version is 2 21:37:31.614 [Thread-5] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - FileOutputCommitter skip cleanup _temporary folders under output directory:false, ignore cleanup failures: false 21:37:31.615 [Thread-5] INFO org.apache.hadoop.mapred.LocalJobRunner - OutputCommitter is org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter 21:37:31.622 [Thread-5] DEBUG org.apache.hadoop.fs.statistics.impl.IOStatisticsContextIntegration - Created instance IOStatisticsContextImpl{id=2, threadId=32, ioStatistics=counters=(); gauges=(); minimums=(); maximums=(); means=(); } 21:37:31.629 [Thread-5] DEBUG org.apache.hadoop.hdfs.DFSClient - /output/_temporary/0: masked={ masked: rwxr-xr-x, unmasked: rwxrwxrwx } 21:37:31.637 [IPC Parameter Sending Thread for xxjdxnj/192.168.88.101:8020] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from СIPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С sending #3 org.apache.hadoop.hdfs.protocol.ClientProtocol.mkdirs 21:37:31.649 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С got value #3 21:37:31.654 [Thread-5] DEBUG org.apache.hadoop.io.retry.RetryInvocationHandler - Exception while invoking call #3 ClientNamenodeProtocolTranslatorPB.mkdirs over null. Not retrying because try once and fail. org.apache.hadoop.ipc.RemoteException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) at org.apache.hadoop.ipc.Client.call(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1426) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) at jdk.proxy2/jdk.proxy2.$Proxy11.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$mkdirs$20(ClientNamenodeProtocolTranslatorPB.java:611) at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:611) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) at jdk.proxy2/jdk.proxy2.$Proxy12.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2555) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2531) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1497) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1494) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1511) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1486) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2494) at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:356) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:541) 21:37:31.663 [IPC Parameter Sending Thread for xxjdxnj/192.168.88.101:8020] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from СIPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С sending #4 org.apache.hadoop.hdfs.protocol.ClientProtocol.delete 21:37:31.674 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С got value #4 21:37:31.675 [Thread-5] DEBUG org.apache.hadoop.ipc.ProtobufRpcEngine2 - Call: delete took 12ms 21:37:31.678 [Thread-5] WARN org.apache.hadoop.mapred.LocalJobRunner - job_local1106899704_0001 org.apache.hadoop.security.AccessControlException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2557) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2531) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1497) at org.apache.hadoop.hdfs.DistributedFileSystem$27.doCall(DistributedFileSystem.java:1494) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1511) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1486) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2494) at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:356) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:541) Caused by: org.apache.hadoop.ipc.RemoteException: Permission denied: user=С, access=WRITE, inode="/":hadoop:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:661) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:501) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermissionWithContext(FSPermissionChecker.java:525) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:395) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1964) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1945) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1904) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3531) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1173) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:750) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) at org.apache.hadoop.ipc.Client.call(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1426) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) at jdk.proxy2/jdk.proxy2.$Proxy11.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$mkdirs$20(ClientNamenodeProtocolTranslatorPB.java:611) at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:611) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) at jdk.proxy2/jdk.proxy2.$Proxy12.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:2555) ... 9 common frames omitted 21:37:31.683 [Thread-5] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.fs.FileContext$2@15fc336f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.fs.FileContext.getAbstractFileSystem(FileContext.java:343) at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:465) at org.apache.hadoop.fs.FileContext.getFileContext(FileContext.java:442) at org.apache.hadoop.fs.FileContext.getLocalFSFileContext(FileContext.java:428) at org.apache.hadoop.mapred.LocalDistributedCacheManager.close(LocalDistributedCacheManager.java:268) at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:598) 21:37:32.626 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@77b9d0c7] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isUber(Job.java:1866) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1747) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.626 [main] INFO org.apache.hadoop.mapreduce.Job - Job job_local1106899704_0001 running in uber mode : false 21:37:32.628 [main] INFO org.apache.hadoop.mapreduce.Job - map 0% reduce 0% 21:37:32.628 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$6@3b0ee03a] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:730) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1759) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.629 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@796065aa] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.629 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@28a6301f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1737) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.630 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$6@2c306a57] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getTaskCompletionEvents(Job.java:730) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1759) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.630 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@773e2eb5] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isComplete(Job.java:613) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1736) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.631 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@d8948cd] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isSuccessful(Job.java:625) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1763) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.631 [main] INFO org.apache.hadoop.mapreduce.Job - Job job_local1106899704_0001 failed with state FAILED due to: NA 21:37:32.631 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$8@7abe27bf] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.getCounters(Job.java:818) at org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1770) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1698) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.651 [main] INFO org.apache.hadoop.mapreduce.Job - Counters: 0 21:37:32.651 [main] DEBUG org.apache.hadoop.security.UserGroupInformation - PrivilegedAction [as: С (auth:SIMPLE)][action: org.apache.hadoop.mapreduce.Job$1@2679311f] java.lang.Exception: null at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1950) at org.apache.hadoop.mapreduce.Job.updateStatus(Job.java:329) at org.apache.hadoop.mapreduce.Job.isSuccessful(Job.java:625) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1710) at cn.itcast.mr.dedup.MatrixMultiplication.main(MatrixMultiplication.java:128) 21:37:32.653 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1530)); Key: (С (auth:SIMPLE))@hdfs://192.168.88.101:8020; URI: hdfs://192.168.88.101:8020; Object Identity Hash: 2e075efe 21:37:32.653 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - stopping client from cache: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - removing client from cache: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - stopping actual client because no more references remain: Client-e9ac678cebb441d58dd3dc3f8f54b798 21:37:32.654 [shutdown-hook-0] DEBUG org.apache.hadoop.ipc.Client - Stopping client 21:37:32.655 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С: closed 21:37:32.655 [IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С] DEBUG org.apache.hadoop.ipc.Client - IPC Client (1759899303) connection to xxjdxnj/192.168.88.101:8020 from С: stopped, remaining connections 0 21:37:32.655 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.fs.FilterFileSystem.close(FilterFileSystem.java:529)); Key: (С (auth:SIMPLE))@file://; URI: file:///; Object Identity Hash: 2a38dfe6 21:37:32.655 [shutdown-hook-0] DEBUG org.apache.hadoop.fs.FileSystem - FileSystem.close() by method: org.apache.hadoop.fs.RawLocalFileSystem.close(RawLocalFileSystem.java:895)); Key: null; URI: file:///; Object Identity Hash: 6f3a54c5 21:37:32.656 [shutdown-hook-0] DEBUG org.apache.hadoop.hdfs.KeyProviderCache - Invalidating all cached KeyProviders. 21:37:32.656 [Thread-1] DEBUG org.apache.hadoop.util.ShutdownHookManager - Completed shutdown in 0.004 seconds; Timeouts: 0 21:37:32.664 [Thread-1] DEBUG org.apache.hadoop.util.ShutdownHookManager - ShutdownHookManager completed shutdown. Process finished with exit code 1
06-22
评论 5
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值