[20190721]12CR2 max_idle_time 2.txt

本文详细介绍了 Oracle 数据库中 MAX_IDLE_TIME 参数的作用与用法,包括其默认值、修改方式及范围,并通过两个测试案例展示了该参数如何影响会话的空闲时间和事务的处理。

[20190721]12CR2 max_idle_time 2.txt

--//昨天测试max_idle_time,我自己有点不理解oracle为什么会加入这样的参数。
--//首先这个参数仅仅在system级别上测试,而不是在session级别,缺乏灵活性。

--//查看官方文档如下:
https://docs.oracle.com/en/database/oracle/oracle-database/18/refrn/MAX_IDLE_TIME.html#GUID-9E26A81D-D99E-4EA8-88DE-77AF68482A20

1.184 MAX_IDLE_TIME

MAX_IDLE_TIME specifies the maximum number of minutes that a session can be idle. After that point, the session is
automatically terminated.

Property                Description
----------------------------------------------
Parameter type         Integer
Default value          0
Modifiable             ALTER SYSTEM
Modifiable in a PDB    Yes
Range of values        0 to the maximum integer. The value of 0 indicates that there is no limit.
Basic                  No
Oracle RAC             Different instances can use different values.
----------------------------------------------
--//补充测试有事务的情况。

1.环境:
SCOTT@test01p> @ ver1
PORT_STRING                    VERSION        BANNER                                                                               CON_ID
------------------------------ -------------- -------------------------------------------------------------------------------- ----------
IBMPC/WIN_NT64-9.1.0           12.2.0.1.0     Oracle Database 12c Enterprise Edition Release 12.2.0.1.0 - 64bit Production              0

SCOTT@test01p> alter system set max_idle_time=1 scope=memory;
System altered.

--//退出再进入。

2.测试一:
SCOTT@test01p> select sysdate from dual ;

SYSDATE
-------------------
2019-07-21 07:35:28

--//等30秒后。中间乱按一些字符在删除(或者直接输入以上命令),等2分钟之后执行(不要超过2分30秒执行):

SCOTT@test01p> select sysdate from dual ;
SYSDATE
-------------------
2019-07-21 07:37:33

--//可以发现并没有断开。继续中间在界面上按鼠标按钮。等2分钟之后执行:

SCOTT@test01p> select sysdate from dual ;
select sysdate from dual
*
ERROR at line 1:
ORA-03113: end-of-file on communication channel
Process ID: 2604
Session ID: 251 Serial number: 62541

--//可以开始记时从有键盘输入算起。超时最小要2*MAX_IDLE_TIME才会终止连接。

3.测试二,如果有事务情况如下:
SCOTT@test01p> create table t as select level id from dual connect by level<=10;
Table created.

SCOTT@test01p> update t set id=id+1;
10 rows updated.

SCOTT@test01p> select sysdate from dual ;
SYSDATE
-------------------
2019-07-21 07:49:17

SCOTT@test01p> host sleep 120

SCOTT@test01p> select sysdate from dual ;
select sysdate from dual
*
ERROR at line 1:
ORA-03113: end-of-file on communication channel
Process ID: 908
Session ID: 181 Serial number: 31136

--//重新登录:
SCOTT@test01p> select * from t;
        ID
----------
         1
         2
         3
         4
         5
         6
         7
         8
         9
        10
10 rows selected.
--//事务回滚了。

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/267265/viewspace-2651245/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/267265/viewspace-2651245/

#include "stm32f10x_gpio.h" #include "stm32f10x_rcc.h" #include "system_stm32f10x.h" #include "stm32f10x_tim.h" #include "stm32f10x.h" #include "stm32f10x_it.h" #include "misc.h" #include "key.h" #include "time_init.h" #include "my_usart1.h" #include "my_usart1_config.h" #include "systick.h" #define pi 3.1415926 #include <math.h> #include <string.h> //用到字符串函数 #include <stdio.h> //用到printf() #include "bit_band.h" __IO uint32_t TIM6_ms_nums,TIM6_ms_setnums,SysTick_ms_nums,period,width,SysTick_ms_setnums,kf1,kf2; float T_sine,ratio_in,T_nums,ratio_out; extern float fsine,fflash; #define ReceiveByInterrupt 1 u8 Usart1_flag=0; int main() { NVIC_Conf_Forkey_OnRegister(); u16 my_time3_psc=0,my_time3_arr=35999; u16 my_time4_psc=0,my_time4_arr=60000; Time3_PwmoutCNF_OnReg(my_time3_arr,my_time3_psc); //TIM3 PWM输出配置 Time4_PwmdetectCNF_OnReg(my_time4_arr,my_time4_psc); //TIM4输入捕获配置 TIM6_Conf_OnRegister(); //TIM6定时器配置 NVIC_Conf_ForTIm6_OnRegister(); //TIM6中断配置 SysTick_Configuration(); //系统滴答定时器配置 KEY_EXTI_Init_OnRegister(); //按键外部中断初始化,LED的初始化 float f_sine; f_sine=0.2; T_sine=1/f_sine; T_nums=((float) T_sine)/0.001; //串口USART1引脚与时钟的配置 USART1_Pin_Config_OnReg(); //串口USART1的初始化设置 USART1_Config_OnReg(); if (ReceiveByInterrupt) Usart1_NVIC_Conf(); //如采用中断方式接受数据,则调用串口的中断初始化 SysTick_ms_setnums=5000; SysTick_ms_nums=0; TIM6_ms_setnums=5000; TIM6_ms_nums=0; SendString((u8 *)("Welcome to mycontroller!\n")); while(1) { if (kf2==1) { if (TIM6_ms_nums>=T_nums) TIM6_ms_nums=0; ratio_in=0.5+0.5*sin(2*pi*f_sine*TIM6_ms_nums*0.001); TIM3->CCR2=ratio_in*my_time3_arr; //设置PWM占空比 period=TIM4->CCR1+1; //读取输入信号周期 width=TIM4->CCR2+1; //读取输入信号脉宽 ratio_out=((float) width/(float) period); char char_buff0[40]; sprintf(char_buff0,"ratio_in received is:%.4f;\n",ratio_in); //格式化字符串 SendString((u8*) char_buff0); char char_buff[40]; sprintf(char_buff,"ratio_out received is:%.4f;\n",ratio_out); SendString((u8*) char_buff); } if (kf1==1) { if(SysTick_ms_nums<=(SysTick_ms_setnums/2)) { PEout(5)=1; } else { PEout(5)=0; } } else { PEout(5)=0; } u8 temp1=0; //查询方式 if ((USART1->SR)&(0x01<<5) && !ReceiveByInterrupt) //查询RXNE位是否为1,即是否收到数据 { ReceiveString(); //接收字符串 temp1=ReceiveData_analysis(); } //中断方式 if(Usart1_flag==1 && ReceiveByInterrupt) { temp1=ReceiveData_analysis(); Usart1_flag=0; //接受到的新数据已处理完毕,标志清零 } if (temp1!=0) //temp1!=0表示接受到新数据 { switch(temp1) //根据接收到的字符串数据,执行相应的动作 { case 1: { char char_buff[40]; sprintf(char_buff,"fsine received is:%.4f;\n",fsine); SendString((u8 *)char_buff); f_sine=fsine; } break; case 2: { char char_buff[40]; sprintf(char_buff,"fflash received is:%.4f;\n",fflash); SendString((u8 *) char_buff); SysTick_ms_setnums=1000/fflash; } break; case 3: SendString((u8 *)("Command is error!\n")); break; } temp1=0; //清除临时变量 EmptyRxBuffer(Max); //清空接收缓冲区 } } } #include "stm32f10x_rcc.h" #include "stm32f10x_gpio.h" #include "stm32f10x_usart.h" #include "my_usart1_config.h" #include "misc.h" //串口涉及的GPIO及其时钟初始化 //注意:该函数使用默认的USART1的TX\RX引脚 //如果要重定义引脚,必须加以修改(没有使用复用功能重映射) //注意:USART1在APB2桥,USART2、USART3在APB1桥 //USART1引脚PA9,PA10基于库函数的初始化配置 void USART1_Pin_Config_OnLibFunc(void) { //USART1对应的引脚在PA,PA和USART1端口都在APB2桥,两者时钟都要使能,否则无法通信 RCC_APB2PeriphClockCmd(RCC_APB2Periph_GPIOA|RCC_APB2Periph_USART1, ENABLE); GPIO_InitTypeDef GPIO_InitStructure; GPIO_InitStructure.GPIO_Pin=GPIO_Pin_9; //USART1的TX GPIO_InitStructure.GPIO_Mode=GPIO_Mode_AF_PP; //复用推挽输出 GPIO_InitStructure.GPIO_Speed=GPIO_Speed_50MHz; //可不用,默认值就行 GPIO_Init(GPIOA,&GPIO_InitStructure); GPIO_InitStructure.GPIO_Pin=GPIO_Pin_10; //USART1的RX GPIO_InitStructure.GPIO_Mode=GPIO_Mode_IN_FLOATING; //浮空输入 GPIO_Init(GPIOA,&GPIO_InitStructure); } //USART1引脚PA9,PA10基于寄存器的初始化配置 void USART1_Pin_Config_OnReg(void) { //USART1对应的引脚在PA,PA和USART1端口都在APB2桥,两者时钟都要使能,否则无法通信 RCC->APB2ENR |=(0x01<<2); //使能GPIOA时钟 RCC->APB2ENR |=(0x01<<14); //使能USART1时钟 GPIOA->CRH |=(0x0B<<4); //MODE[3:0]=1011,USART1的TX(PA9),复用推挽输出,Speed_50MHz GPIOA->CRH |=(0x04<<8); //MODE[3:0]=0100,USART1的RX(PA10),复用浮空输入, } //USART1串口基于库函数的初始化函数 void USART1_Config_OnLibFunc(void) { USART_InitTypeDef USART_InitStructure; //初始化结构体 USART_InitStructure.USART_BaudRate=115200; //115200 USART_InitStructure.USART_WordLength=USART_WordLength_8b;//8位数据 USART_InitStructure.USART_StopBits=USART_StopBits_1; //1位停止位 USART_InitStructure.USART_Parity=USART_Parity_No; //无奇偶校验 USART_InitStructure.USART_HardwareFlowControl=USART_HardwareFlowControl_None;//无硬件流控 USART_InitStructure.USART_Mode=USART_Mode_Rx|USART_Mode_Tx; //允许接收和发送 USART_Init(USART1, &USART_InitStructure); //初始化 USART_Cmd(USART1,ENABLE); //使能USARTx } //USART1串口基于寄存器的初始化函数 void USART1_Config_OnReg(void) { USART1->BRR=72000000/115200; USART1->CR1 &=~(0x01<<12); //M位=0:1起始位,8数据位,n停止位 USART1->CR2 &=~(0x03<<12); //STOP[1:0]=00,1位停止位 USART1->CR1 &=~(0x01<<10); //PCE位=0:校验控制被禁止 USART1->CR3 &=~(0x01<<9); //CTS位=0;硬件流控制被禁止 USART1->CR1 |=(0x03<<2); //使能接受和发送功能,其余位为零,意味:8位数据,,无奇偶校验 USART1->CR1 |=(0x01<<13); //UE位=1,USART模块使能 } void Usart1_NVIC_Conf(void) { USART_ITConfig(USART1,USART_IT_RXNE,ENABLE); // USART_ITConfig(USART1,USART_IT_IDLE,ENABLE); NVIC_PriorityGroupConfig(NVIC_PriorityGroup_1); NVIC_InitTypeDef Usart1ConfStrut; Usart1ConfStrut.NVIC_IRQChannel=USART1_IRQn; Usart1ConfStrut.NVIC_IRQChannelPreemptionPriority=0; Usart1ConfStrut.NVIC_IRQChannelSubPriority=1; Usart1ConfStrut.NVIC_IRQChannelCmd=ENABLE; NVIC_Init(&Usart1ConfStrut); } #include "time_init.h" #include "stm32f10x_rcc.h" #include "stm32f10x.h" #include "stm32f10x_gpio.h" #include "stm32f10x_tim.h" void Time3_PwmoutCNF_OnReg(u16 arr,u16 psc) { RCC->APB2ENR |=0x01; //开启AFIO时钟 RCC->APB2ENR |=0x01<<3; //开启GPIO时钟 RCC->APB1ENR |=0x01<<1; //开启TIM3的时钟 GPIOB->CRL &=~(0x0F<<(4*5)); GPIOB->CRL |=(0x0B<<(4*5)); //设置PIN_5为复用功能推挽输出,最大速度为50MHz AFIO->MAPR &=~(0x03<<10); AFIO->MAPR |=(0x02<<10); //将TIM3_CH2重映射到PB5 TIM3->CR1 |=(0x01<<7); //TIMx_ARR寄存器被装入缓冲器 TIM3->PSC=psc; TIM3->ARR=arr; TIM3->CCER &=~(0x01<<4); TIM3->CCMR1 &=~(0xFF<<1*8); TIM3->CCMR1 |=(0x60<<1*8); //CC2通道被配置为输出,向上计数时TIMx_CNT<TIMx_CCR1时通道1为有效电平,否则为无效电平 TIM3->CCER |=(0x01<<4); //输入/捕获2输出使能 TIM3->CCER &=~(0x01<<5); //OC2高电平有效 TIM3->CCMR1 |=(0x01<<7); //使能预装载寄存器 TIM3->CR1 |=(0x01<<0); //使能TIM3 } void Time4_PwmdetectCNF_OnReg(u16 arr,u16 psc) { RCC->APB1ENR |=(0x01<<2); RCC->APB2ENR |=(0X01<<0); //使能 AFIO时钟 RCC->APB2ENR |=(0X01<<3); //使能 GPIOB时钟 //设置引脚PB6为TIM4_CH1复用输入功能,配置为浮空输入 GPIOB->CRL |=(0x04<<(4*6)); //复用输入,配置为浮空输入 //初始化TIM4 TIM4->CR1 |=(0x01<<7); //ARPE=1,TIM3->ARR被装入缓存器,同时设置计数模式为CMS=00,DIR=0向上计数,CKD=00 TIM4->ARR=arr; TIM4->PSC=psc; //设置CC1 TIM4->CCMR1 &=~(0x01<<1); //清零CC1S高位 TIM4->CCMR1 |=0x01; //CC1S=01 选择输入端 IC1映射到TI1上 TIM4->CCMR1 &=~(0x0F<<4); //IC1F=0000 配置输入滤波器 不滤波 TIM4->CCMR1 &=~(0x03<<2); //IC1PS=00 配置输入分频,不分频 TIM4->CCER &=~(0x01<<1); //CC1P=0 上升沿捕获 //设置CC2 TIM4->CCMR1 &=~(0x01<<8); //清零CC2S低位 TIM4->CCMR1 |=(0x01<<9); //CC2S=10 选择输入端 IC2映射到TI1上 TIM4->CCMR1 &=~(0x0F<<4); //IC2F=0000 配置输入滤波器 不滤波 TIM4->CCMR1 &=~(0x03<<2); //IC2PS=00 配置输入分频,不分频 TIM4->CCER |=(0x01<<5); //CC2P=1 下降沿捕获 TIM4->SMCR |=(0x05<<4); //TS=101,选择TI1FP1 TIM4->SMCR |=0x04; //SMS=100,复位模式 TIM4->CCER |=0x01; //CC1E=1 允许捕获计数器的值到捕获寄存器1中 TIM4->CCER |=(0x01<<4); //CC2E=1 允许捕获计数器的值到捕获寄存器2中 TIM4->CR1|=0x01; //使能定时器4 } void TIM6_Conf_OnRegister(void) { RCC_APB1PeriphClockCmd(RCC_APB1Periph_TIM6,ENABLE); TIM6->PSC=999; //预分频器,72MHz/(999+1)=72kHz TIM6->ARR=72; //自动重装载值,72kHz/72=1kHz(1ms中断) TIM6->SR=0x01; //状态寄存器,清除更新中断标志 TIM6->DIER=0x01; //使能更新中断 TIM6->CR1=0x85; //控制寄存器1,使能定时器,设置计数模式 } void NVIC_Conf_ForTIm6_OnRegister(void) { SCB->AIRCR |=(0x07<<8); //设置中断优先级分组 NVIC->IPR[54]=0x70; //设置TIM6中断优先级 NVIC->ISER[1]=(0x01<<22); //使能TIM6中断 } #include "key.h" #include "misc.h" #include "stm32f10x_gpio.h" #include "stm32f10x_exti.h" #include "stm32f10x_rcc.h" void NVIC_Conf_Forkey_OnRegister(void) { SCB->AIRCR &=~(0x07<<8); SCB->AIRCR |=(0x06<<8); NVIC->IPR[2] &=~(0x0F<<12); NVIC->IPR[2] |=(0x09<<12); NVIC->IPR[2] &=~(0x0F<<20); NVIC->IPR[2] |=(0x09<<20); NVIC->ISER[0] |=(0x03<<9); //中断使能 } void KEY_EXTI_Init_OnRegister(void) { RCC_APB2PeriphClockCmd(RCC_APB2Periph_GPIOE,ENABLE); RCC_APB2PeriphClockCmd(RCC_APB2Periph_AFIO, ENABLE); //开启AFIO的时钟,外部中断必须开启AFIO的时钟 GPIOE->CRL &=~(0x0F<<4*5); GPIOE->CRL |=(0x03<<4*5); //设置PE5为推挽输出,速度50MHz GPIOE->CRL &=~(0xFF<<4*3); GPIOE->CRL|=(0x88<<4*3); GPIOE->ODR|=(0x3<<3); //将PE3和PE4设置为上拉输入 AFIO->EXTICR[0] &=~(0x0F<<12); AFIO->EXTICR[0] |=(0x04<<12); //配置PE3为复用功能EXTI3中断 EXTI->FTSR |=(0x01<<3); EXTI->EMR &=~(0x01<<3); //屏蔽事件请求 EXTI->IMR |=(0x01<<3); //开放中断请求 AFIO->EXTICR[1] &=~(0x0F); AFIO->EXTICR[1] |=(0x04); //配置PE4为复用功能EXTI4中断 EXTI->FTSR |=(0x01<<4); EXTI->EMR &=~(0x01<<4); EXTI->IMR |=(0x01<<4); } #include "stm32f10x_tim.h" void SysTick_Configuration(void) { SysTick->CTRL|=0x1<<2; //选择时钟源,设置为 1 表示使用处理器时钟(HCLK),设置为 0 表示使用外部参考时钟 SysTick->CTRL|=0x03; //第 0 位(ENABLE):使能 SysTick 定时器,第 1 位(TICKINT):使能 SysTick 中断,当计数器减到 0 时会产生中断 SysTick->LOAD|=71999; //设置定时器的重载值为 71999,SysTick 是递减计数器,从 LOAD 值开始递减到 0 } #include "stm32f10x_usart.h" #include <string.h> //用到字符串函数 #include "stdlib.h" //字符串转浮点数函数atof() #include "stdio.h" #include "my_usart1.h" //通信缓冲区定义 u8 RxBuffer[Max]; u8 TxBuffer[Max]; u8 RxCount=0; //接收发送字节数 u8 TxCount=0; float fsine,fflash; float RxBufferToFloat(char *p1); // 声明字符串转浮点数函数 //查询法发送一字节函数 //参数:串口1,2,3,待发送字节 //返回:枚举ERR,或者OK //发送寄存器为空标志USART_FLAG_TXE TXRXstat Send1Byte(u8 dat) { vu32 cnt=0; //超时计时器 USART_SendData(USART1,dat); //发送 while(USART_GetFlagStatus(USART1,USART_FLAG_TXE)==RESET) { cnt++; if(cnt>100000) return ERR; //发送超时,返回ERR } //等待发送完成 return OK; } //查询法接收一字节函数 u8 ReceivelByte(void) { while(USART_GetFlagStatus(USART1,USART_FLAG_RXNE)==RESET){} //等待接收完成 return (USART_ReceiveData(USART1)); //接收数据并返回 } //接收一帧数据函数 void ReceiveString(void) { while(1) { RxBuffer[RxCount]=ReceivelByte(); // 接收一个字节存入缓冲区 if(RxBuffer[RxCount]=='\r') break; // 遇到回车符停止接收 RxCount++; // 缓冲区索引递增 } } u8 ReceiveData_analysis(void) { if(strstr((char*) RxBuffer,"fsine")!=NULL) //接收字符串中查找ON { fsine= RxBufferToFloat((char *)RxBuffer); //从接受的字符串中提取接受的参数函数 RxCount=0; //为下次接收指令做好准备,否则会导致下一条指令无响应 return 1; } else if(strstr((char*) RxBuffer,"fflash")!=NULL) { fflash= RxBufferToFloat((char *)RxBuffer); //从接受的字符串中提取接受的参数函数 RxCount=0; return 2; } else { RxCount=0; return 3; } } //发送一个字符串 //参数:待发送的多字节数据(字节末尾以“\0'结束) //返回值:无 void SendString(u8 *Message) { while(*Message!='\0') // 遍历字符串直到结束符'\0' Send1Byte(*Message++); // 遍历字符串直到结束符'\0' } //清空缓冲区函数 void EmptyRxBuffer(u8 len) { u8 i; for(i=0;i<len;i++) RxBuffer[i]=0; } float RxBufferToFloat(char *p1) { int i=0; while(1) { if(p1[i]=='=') // 找到等号分隔符 { int m=i+1,k=0; char *freq=(char*)malloc(RxCount-m); // 动态分配内存存储数字字符串 i++; // 提取等号后的数字部分直到回车符 while(p1[i] !='\r') { freq[k]=p1[i]; // 复制字符 i++; k++; } float f1=atof(freq); // 将字符串转换为浮点数 free(freq); // 释放动态内存 return f1; } else i++; } } 这个stm32 嵌入式实验的目的、原理、方法分别是什么
11-22
//TIMER3中断 1ms一次 void OS_TASK_IRQHandler(void) { static uint8_t s_state = 0; static uint32_t s_outFrequency = 0; static uint32_t s_timeCnt = 0; //差值频率 double diff = tMasterBoardData.Flash_StopFreq - tMasterBoardData.Flash_StartFreq; // u8 i = 0; if (RESET != TIM_GetITStatus(OS_TASK_TIM,TIM_IT_Update))//检查TIM3更新中断发生与否 { TIM_ClearITPendingBit(OS_TASK_TIM,TIM_IT_Update); /* 全局运行时间每1ms增1 */ g_iRunTime++; if (g_iRunTime == 0x7FFFFFFF) /* 这个变量是 int32_t 类型,最大数为 0x7FFFFFFF */ { g_iRunTime = 0; } switch (s_state) { /* 递增环节 */ case 0: //频率改变,占空比不变 if(tMasterBoardData.Flash_SweepMode & (1<<0)) { if(tMasterBoardData.Flash_RiseTime > g_iRunTime) { s_outFrequency = tMasterBoardData.Flash_StartFreq + diff * g_iRunTime / tMasterBoardData.Flash_RiseTime; tMasterBoardData.ActualFrequenct = s_outFrequency; Set_TIM_PWM_Update(TIM1,4,tMasterBoardData.ActualFrequenct,tMasterBoardData.Flash_StartDuty*100); } else { g_iRunTime = 0; s_state = 1; } } break; /* 保持环节 */ case 1: if(tMasterBoardData.Flash_SweepMode & (1<<0)) { if(tMasterBoardData.Flash_HodeTime > g_iRunTime) { s_outFrequency = tMasterBoardData.Flash_StopFreq; tMasterBoardData.ActualFrequenct = s_outFrequency; Set_TIM_PWM_Update(TIM1,4,tMasterBoardData.ActualFrequenct,tMasterBoardData.Flash_StartDuty*100); } else { g_iRunTime = 0; s_state = 2; } } break; /* 递减环节 */ case 2: if(tMasterBoardData.Flash_SweepMode & (1<<0)) { if(tMasterBoardData.Flash_FallTime > g_iRunTime) { s_outFrequency = tMasterBoardData.Flash_StopFreq - diff * g_iRunTime / tMasterBoardData.Flash_FallTime; tMasterBoardData.ActualFrequenct = s_outFrequency; Set_TIM_PWM_Update(TIM1,4,tMasterBoardData.ActualFrequenct,tMasterBoardData.Flash_StartDuty*100); } else { g_iRunTime = 0; s_state = 0; } } break; default: break; } } }
08-02
连接主机... 连接主机成功 Last login: Mon Dec 29 11:32:00 2025 from 192.168.81.1 [ma@master ~]$ su 密码: [root@master ma]# [root@master ma]# cd /export/server/kafka [root@master kafka]# ./bin/kafka-server-start.sh config/server.properties [2025-12-29 11:33:50,672] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [2025-12-29 11:33:51,185] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) [2025-12-29 11:33:51,250] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) [2025-12-29 11:33:51,256] INFO starting (kafka.server.KafkaServer) [2025-12-29 11:33:51,257] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) [2025-12-29 11:33:51,279] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) [2025-12-29 11:33:51,289] INFO Client environment:zookeeper.version=3.5.8-f439ca583e70862c3068a1f2a7d4d068eec33315, built on 05/04/2020 15:53 GMT (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,289] INFO Client environment:host.name=master (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,289] INFO Client environment:java.version=1.8.0_241 (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,289] INFO Client environment:java.vendor=Oracle Corporation (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,289] INFO Client environment:java.home=/export/server/jdk/jre (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:java.class.path=/export/server/kafka/bin/../libs/activation-1.1.1.jar:/export/server/kafka/bin/../libs/aopalliance-repackaged-2.5.0.jar:/export/server/kafka/bin/../libs/argparse4j-0.7.0.jar:/export/server/kafka/bin/../libs/audience-annotations-0.5.0.jar:/export/server/kafka/bin/../libs/commons-cli-1.4.jar:/export/server/kafka/bin/../libs/commons-lang3-3.8.1.jar:/export/server/kafka/bin/../libs/connect-api-2.6.0.jar:/export/server/kafka/bin/../libs/connect-basic-auth-extension-2.6.0.jar:/export/server/kafka/bin/../libs/connect-file-2.6.0.jar:/export/server/kafka/bin/../libs/connect-json-2.6.0.jar:/export/server/kafka/bin/../libs/connect-mirror-2.6.0.jar:/export/server/kafka/bin/../libs/connect-mirror-client-2.6.0.jar:/export/server/kafka/bin/../libs/connect-runtime-2.6.0.jar:/export/server/kafka/bin/../libs/connect-transforms-2.6.0.jar:/export/server/kafka/bin/../libs/hk2-api-2.5.0.jar:/export/server/kafka/bin/../libs/hk2-locator-2.5.0.jar:/export/server/kafka/bin/../libs/hk2-utils-2.5.0.jar:/export/server/kafka/bin/../libs/jackson-annotations-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-core-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-databind-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-dataformat-csv-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-datatype-jdk8-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-jaxrs-base-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-module-paranamer-2.10.2.jar:/export/server/kafka/bin/../libs/jackson-module-scala_2.12-2.10.2.jar:/export/server/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/export/server/kafka/bin/../libs/jakarta.annotation-api-1.3.4.jar:/export/server/kafka/bin/../libs/jakarta.inject-2.5.0.jar:/export/server/kafka/bin/../libs/jakarta.ws.rs-api-2.1.5.jar:/export/server/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/export/server/kafka/bin/../libs/javassist-3.22.0-CR2.jar:/export/server/kafka/bin/../libs/javassist-3.26.0-GA.jar:/export/server/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/export/server/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/export/server/kafka/bin/../libs/jaxb-api-2.3.0.jar:/export/server/kafka/bin/../libs/jersey-client-2.28.jar:/export/server/kafka/bin/../libs/jersey-common-2.28.jar:/export/server/kafka/bin/../libs/jersey-container-servlet-2.28.jar:/export/server/kafka/bin/../libs/jersey-container-servlet-core-2.28.jar:/export/server/kafka/bin/../libs/jersey-hk2-2.28.jar:/export/server/kafka/bin/../libs/jersey-media-jaxb-2.28.jar:/export/server/kafka/bin/../libs/jersey-server-2.28.jar:/export/server/kafka/bin/../libs/jetty-client-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-continuation-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-http-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-io-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-security-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-server-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-servlet-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-servlets-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jetty-util-9.4.24.v20191120.jar:/export/server/kafka/bin/../libs/jopt-simple-5.0.4.jar:/export/server/kafka/bin/../libs/kafka_2.12-2.6.0.jar:/export/server/kafka/bin/../libs/kafka_2.12-2.6.0-sources.jar:/export/server/kafka/bin/../libs/kafka-clients-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-log4j-appender-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-streams-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-streams-examples-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-streams-scala_2.12-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-streams-test-utils-2.6.0.jar:/export/server/kafka/bin/../libs/kafka-tools-2.6.0.jar:/export/server/kafka/bin/../libs/log4j-1.2.17.jar:/export/server/kafka/bin/../libs/lz4-java-1.7.1.jar:/export/server/kafka/bin/../libs/maven-artifact-3.6.3.jar:/export/server/kafka/bin/../libs/metrics-core-2.2.0.jar:/export/server/kafka/bin/../libs/netty-buffer-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-codec-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-common-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-handler-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-resolver-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-transport-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-transport-native-epoll-4.1.50.Final.jar:/export/server/kafka/bin/../libs/netty-transport-native-unix-common-4.1.50.Final.jar:/export/server/kafka/bin/../libs/osgi-resource-locator-1.0.1.jar:/export/server/kafka/bin/../libs/paranamer-2.8.jar:/export/server/kafka/bin/../libs/plexus-utils-3.2.1.jar:/export/server/kafka/bin/../libs/reflections-0.9.12.jar:/export/server/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/export/server/kafka/bin/../libs/scala-collection-compat_2.12-2.1.6.jar:/export/server/kafka/bin/../libs/scala-java8-compat_2.12-0.9.1.jar:/export/server/kafka/bin/../libs/scala-library-2.12.11.jar:/export/server/kafka/bin/../libs/scala-logging_2.12-3.9.2.jar:/export/server/kafka/bin/../libs/scala-reflect-2.12.11.jar:/export/server/kafka/bin/../libs/slf4j-api-1.7.30.jar:/export/server/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/export/server/kafka/bin/../libs/snappy-java-1.1.7.3.jar:/export/server/kafka/bin/../libs/validation-api-2.0.1.Final.jar:/export/server/kafka/bin/../libs/zookeeper-3.5.8.jar:/export/server/kafka/bin/../libs/zookeeper-jute-3.5.8.jar:/export/server/kafka/bin/../libs/zstd-jni-1.4.4-7.jar (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:os.version=3.10.0-1160.71.1.el7.x86_64 (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,290] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,291] INFO Client environment:user.dir=/export/server/kafka (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,291] INFO Client environment:os.memory.free=977MB (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,291] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,291] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,293] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@15ff3e9e (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:51,299] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) [2025-12-29 11:33:51,306] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) [2025-12-29 11:33:51,310] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2025-12-29 11:33:51,315] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [2025-12-29 11:33:51,319] INFO Socket connection established, initiating session, client: /127.0.0.1:54580, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) [2025-12-29 11:33:51,337] INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x100005654de0000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) [2025-12-29 11:33:51,341] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2025-12-29 11:33:51,653] INFO Cluster ID = QvaJvRsMRZmudq1srV4Xyg (kafka.server.KafkaServer) [2025-12-29 11:33:51,729] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = PLAINTEXT://192.168.81.130:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 0 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.max.bytes = 57671680 fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 0 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 2.6-IV0 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = PLAINTEXT://192.168.81.130:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /export/server/kafka/logs log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.6-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1048588 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 1 num.recovery.threads.per.data.dir = 1 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 30000 replica.selector.class = null replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = GSSAPI sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT security.providers = null socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2] ssl.endpoint.identification.algorithm = https ssl.engine.factory.class = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = DEFAULT ssl.protocol = TLSv1.2 ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 2 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.clientCnxnSocket = null zookeeper.connect = localhost:2181 zookeeper.connection.timeout.ms = 60000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 18000 zookeeper.set.acl = false zookeeper.ssl.cipher.suites = null zookeeper.ssl.client.enable = false zookeeper.ssl.crl.enable = false zookeeper.ssl.enabled.protocols = null zookeeper.ssl.endpoint.identification.algorithm = HTTPS zookeeper.ssl.keystore.location = null zookeeper.ssl.keystore.password = null zookeeper.ssl.keystore.type = null zookeeper.ssl.ocsp.enable = false zookeeper.ssl.protocol = TLSv1.2 zookeeper.ssl.truststore.location = null zookeeper.ssl.truststore.password = null zookeeper.ssl.truststore.type = null zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2025-12-29 11:33:51,752] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = PLAINTEXT://192.168.81.130:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 0 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.max.bytes = 57671680 fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 0 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 2.6-IV0 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = PLAINTEXT://192.168.81.130:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /export/server/kafka/logs log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.6-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1048588 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 1 num.recovery.threads.per.data.dir = 1 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 30000 replica.selector.class = null replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = GSSAPI sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT security.providers = null socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2] ssl.endpoint.identification.algorithm = https ssl.engine.factory.class = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = DEFAULT ssl.protocol = TLSv1.2 ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 2 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.clientCnxnSocket = null zookeeper.connect = localhost:2181 zookeeper.connection.timeout.ms = 60000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 18000 zookeeper.set.acl = false zookeeper.ssl.cipher.suites = null zookeeper.ssl.client.enable = false zookeeper.ssl.crl.enable = false zookeeper.ssl.enabled.protocols = null zookeeper.ssl.endpoint.identification.algorithm = HTTPS zookeeper.ssl.keystore.location = null zookeeper.ssl.keystore.password = null zookeeper.ssl.keystore.type = null zookeeper.ssl.ocsp.enable = false zookeeper.ssl.protocol = TLSv1.2 zookeeper.ssl.truststore.location = null zookeeper.ssl.truststore.password = null zookeeper.ssl.truststore.type = null zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2025-12-29 11:33:51,793] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:51,794] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:51,795] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:51,829] INFO Loading logs from log dirs ArrayBuffer(/export/server/kafka/logs) (kafka.log.LogManager) [2025-12-29 11:33:51,831] INFO Skipping recovery for all logs in /export/server/kafka/logs since clean shutdown file was found (kafka.log.LogManager) [2025-12-29 11:33:51,843] INFO Loaded 0 logs in 14ms. (kafka.log.LogManager) [2025-12-29 11:33:51,862] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [2025-12-29 11:33:51,866] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [2025-12-29 11:33:52,346] INFO Awaiting socket connections on 192.168.81.130:9092. (kafka.network.Acceptor) [2025-12-29 11:33:52,397] INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) [2025-12-29 11:33:52,425] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,427] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,427] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,428] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,444] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [2025-12-29 11:33:52,514] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) [2025-12-29 11:33:52,538] ERROR Error while creating ephemeral at /brokers/ids/0, node already exists and owner '72057932414386176' does not match current session '72057964828950528' (kafka.zk.KafkaZkClient$CheckedEphemeral) [2025-12-29 11:33:52,546] ERROR [KafkaServer id=0] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer) org.apache.zookeeper.KeeperException$NodeExistsException: KeeperErrorCode = NodeExists at org.apache.zookeeper.KeeperException.create(KeeperException.java:126) at kafka.zk.KafkaZkClient$CheckedEphemeral.getAfterNodeExists(KafkaZkClient.scala:1821) at kafka.zk.KafkaZkClient$CheckedEphemeral.create(KafkaZkClient.scala:1759) at kafka.zk.KafkaZkClient.checkedEphemeralCreate(KafkaZkClient.scala:1726) at kafka.zk.KafkaZkClient.registerBroker(KafkaZkClient.scala:95) at kafka.server.KafkaServer.startup(KafkaServer.scala:293) at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:44) at kafka.Kafka$.main(Kafka.scala:82) at kafka.Kafka.main(Kafka.scala) [2025-12-29 11:33:52,551] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer) [2025-12-29 11:33:52,553] INFO [SocketServer brokerId=0] Stopping socket server request processors (kafka.network.SocketServer) [2025-12-29 11:33:52,556] INFO [SocketServer brokerId=0] Stopped socket server request processors (kafka.network.SocketServer) [2025-12-29 11:33:52,559] INFO [ReplicaManager broker=0] Shutting down (kafka.server.ReplicaManager) [2025-12-29 11:33:52,560] INFO [LogDirFailureHandler]: Shutting down (kafka.server.ReplicaManager$LogDirFailureHandler) [2025-12-29 11:33:52,561] INFO [LogDirFailureHandler]: Stopped (kafka.server.ReplicaManager$LogDirFailureHandler) [2025-12-29 11:33:52,562] INFO [LogDirFailureHandler]: Shutdown completed (kafka.server.ReplicaManager$LogDirFailureHandler) [2025-12-29 11:33:52,562] INFO [ReplicaFetcherManager on broker 0] shutting down (kafka.server.ReplicaFetcherManager) [2025-12-29 11:33:52,565] INFO [ReplicaFetcherManager on broker 0] shutdown completed (kafka.server.ReplicaFetcherManager) [2025-12-29 11:33:52,565] INFO [ReplicaAlterLogDirsManager on broker 0] shutting down (kafka.server.ReplicaAlterLogDirsManager) [2025-12-29 11:33:52,566] INFO [ReplicaAlterLogDirsManager on broker 0] shutdown completed (kafka.server.ReplicaAlterLogDirsManager) [2025-12-29 11:33:52,566] INFO [ExpirationReaper-0-Fetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,627] INFO [ExpirationReaper-0-Fetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,627] INFO [ExpirationReaper-0-Fetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,629] INFO [ExpirationReaper-0-Produce]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,827] INFO [ExpirationReaper-0-Produce]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,827] INFO [ExpirationReaper-0-Produce]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,828] INFO [ExpirationReaper-0-DeleteRecords]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,828] INFO [ExpirationReaper-0-DeleteRecords]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,828] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,828] INFO [ExpirationReaper-0-ElectLeader]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,829] INFO [ExpirationReaper-0-ElectLeader]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,829] INFO [ExpirationReaper-0-ElectLeader]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2025-12-29 11:33:52,836] INFO [ReplicaManager broker=0] Shut down completely (kafka.server.ReplicaManager) [2025-12-29 11:33:52,837] INFO Shutting down. (kafka.log.LogManager) [2025-12-29 11:33:52,877] INFO Shutdown complete. (kafka.log.LogManager) [2025-12-29 11:33:52,878] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient) [2025-12-29 11:33:52,983] INFO Session: 0x100005654de0000 closed (org.apache.zookeeper.ZooKeeper) [2025-12-29 11:33:52,983] INFO EventThread shut down for session: 0x100005654de0000 (org.apache.zookeeper.ClientCnxn) [2025-12-29 11:33:52,984] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient) [2025-12-29 11:33:52,985] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:53,794] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:53,795] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:53,795] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,795] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,795] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,796] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,797] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,797] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2025-12-29 11:33:54,798] INFO [SocketServer brokerId=0] Shutting down socket server (kafka.network.SocketServer) [2025-12-29 11:33:54,840] INFO [SocketServer brokerId=0] Shutdown completed (kafka.network.SocketServer) [2025-12-29 11:33:54,847] INFO [KafkaServer id=0] shut down completed (kafka.server.KafkaServer) [2025-12-29 11:33:54,848] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable) [2025-12-29 11:33:54,849] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer) [root@master kafka]# 还有tstrap-server localhost:9092 [2025-12-29 11:34:02,707] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:02,836] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:02,940] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:03,144] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:03,548] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:04,353] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:05,362] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:06,270] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:07,176] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:08,387] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:09,597] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:10,806] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:11,715] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:12,823] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:13,932] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:15,039] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:16,247] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:17,255] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:18,463] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:19,471] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:20,581] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:21,590] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:22,701] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:23,908] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:24,714] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:25,824] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:27,032] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:28,239] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:29,246] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:30,354] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:31,262] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:32,268] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:33,183] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:34,090] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:35,198] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:36,306] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:37,213] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:38,422] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:39,630] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:40,536] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:41,444] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:42,352] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:43,359] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:44,467] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:45,475] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:46,583] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:47,489] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:48,697] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:49,708] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:50,615] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:51,725] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:52,934] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:53,943] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:55,151] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:56,261] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:57,469] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:58,378] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:34:59,585] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:35:00,490] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:35:01,397] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) [2025-12-29 11:35:02,404] WARN [AdminClient clientId=adminclient-1] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) Error while executing topic command : Call(callName=createTopics, deadlineMs=1766979302733, tries=1, nextAllowedTryMs=1766979302834) timed out at 1766979302734 after 1 attempt(s) [2025-12-29 11:35:02,738] ERROR org.apache.kafka.common.errors.TimeoutException: Call(callName=createTopics, deadlineMs=1766979302733, tries=1, nextAllowedTryMs=1766979302834) timed out at 1766979302734 after 1 attempt(s) Caused by: org.apache.kafka.common.errors.TimeoutException: Timed out waiting for a node assignment. (kafka.admin.TopicCommand$) [root@master kafka]# 000
最新发布
12-30
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符  | 博主筛选后可见
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值