netty-flush

DefaultChannelPipeline.flush()

调用unsafe

@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
     unsafe.flush();
 }

AbstractChannel.flush()

调用outboundbuffer.addFlush 然后flush0,

@Override
public final void flush() {
    assertEventLoop();

    ChannelOutboundBuffer outboundBuffer = this.outboundBuffer;
    if (outboundBuffer == null) {
        return;
    }

    outboundBuffer.addFlush();
    flush0();
}

ChannelOutboundBuffer.addFlush()

对unflushedEntry继续计数flushed++
若已经cancel,进行其他处理???


/**
* Add a flush to this {@link ChannelOutboundBuffer}. This means all previous added messages are marked as flushed
* and so you will be able to handle them.
*/
public void addFlush() {
	// There is no need to process all entries if there was already a flush before and no new messages
	// where added in the meantime.
	//
	// See https://github.com/netty/netty/issues/2577
	Entry entry = unflushedEntry;
	if (entry != null) {
	    if (flushedEntry == null) {
	        // there is no flushedEntry yet, so start with the entry
	        flushedEntry = entry;
	    }
	    do {
	        flushed ++;
	        if (!entry.promise.setUncancellable()) {
	            // Was cancelled so make sure we free up memory and notify about the freed bytes
	            int pending = entry.cancel();
	            decrementPendingOutboundBytes(pending, false, true);
	        }
	        entry = entry.next;
	    } while (entry != null);
	
	    // All flushed so reset unflushedEntry
	    unflushedEntry = null;
	}
}

flush0()

调用NioSocketChannel.flush0()

@Override
protected final void flush0() {
    // Flush immediately only when there's no pending flush.
    // If there's a pending flush operation, event loop will call forceFlush() later,
    // and thus there's no need to call it now.
    if (isFlushPending()) {
        return;
    }
    super.flush0();
}
//???
private boolean isFlushPending() {
    SelectionKey selectionKey = selectionKey();
    return selectionKey.isValid() && (selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0;
}

NioSocketChannel

主要调用 doWrite(outboundBuffer) 还有其他异常处理

@SuppressWarnings("deprecation")
protected void flush0() {
    if (inFlush0) {
        // Avoid re-entrance
        return;
    }

    final ChannelOutboundBuffer outboundBuffer = this.outboundBuffer;
    if (outboundBuffer == null || outboundBuffer.isEmpty()) {
        return;
    }

    inFlush0 = true;

    // Mark all pending write requests as failure if the channel is inactive.
    if (!isActive()) {
        try {
            if (isOpen()) {
                outboundBuffer.failFlushed(FLUSH0_NOT_YET_CONNECTED_EXCEPTION, true);
            } else {
                // Do not trigger channelWritabilityChanged because the channel is closed already.
                outboundBuffer.failFlushed(FLUSH0_CLOSED_CHANNEL_EXCEPTION, false);
            }
        } finally {
            inFlush0 = false;
        }
        return;
    }

    try {
        doWrite(outboundBuffer);
    } catch (Throwable t) {
        if (t instanceof IOException && config().isAutoClose()) {
            /**
             * Just call {@link #close(ChannelPromise, Throwable, boolean)} here which will take care of
             * failing all flushed messages and also ensure the actual close of the underlying transport
             * will happen before the promises are notified.
             *
             * This is needed as otherwise {@link #isActive()} , {@link #isOpen()} and {@link #isWritable()}
             * may still return {@code true} even if the channel should be closed as result of the exception.
             */
            close(voidPromise(), t, FLUSH0_CLOSED_CHANNEL_EXCEPTION, false);
        } else {
            try {
                shutdownOutput(voidPromise(), t);
            } catch (Throwable t2) {
                close(voidPromise(), t2, FLUSH0_CLOSED_CHANNEL_EXCEPTION, false);
            }
        }
    } finally {
        inFlush0 = false;
    }
}

doWrite()

真正写。对不同的nioBufferCnt用不同的写方法
写完后会调用ChannelOutboundBuffer.removeBytes(writtenBytes)

@Override
protected void doWrite(ChannelOutboundBuffer in) throws Exception {
	for (;;) {
	    int size = in.size();
	    if (size == 0) {
	        // All written so clear OP_WRITE
	        clearOpWrite();
	        break;
	    }
	    long writtenBytes = 0;
	    boolean done = false;
	    boolean setOpWrite = false;
	
	    // Ensure the pending writes are made of ByteBufs only.
	    ByteBuffer[] nioBuffers = in.nioBuffers();
	    int nioBufferCnt = in.nioBufferCount();
	    long expectedWrittenBytes = in.nioBufferSize();
	    SocketChannel ch = javaChannel();
	
	    // Always us nioBuffers() to workaround data-corruption.
	    // See https://github.com/netty/netty/issues/2761
	    switch (nioBufferCnt) {
	        case 0:
	            // We have something else beside ByteBuffers to write so fallback to normal writes.
	            super.doWrite(in);
	            return;
	        case 1:
	            // Only one ByteBuf so use non-gathering write
	            ByteBuffer nioBuffer = nioBuffers[0];
	            for (int i = config().getWriteSpinCount() - 1; i >= 0; i --) {
	                final int localWrittenBytes = ch.write(nioBuffer);
	                if (localWrittenBytes == 0) {
	                    setOpWrite = true;
	                    break;
	                }
	                expectedWrittenBytes -= localWrittenBytes;
	                writtenBytes += localWrittenBytes;
	                if (expectedWrittenBytes == 0) {
	                    done = true;
	                    break;
	                }
	            }
	            break;
	        default:
	            for (int i = config().getWriteSpinCount() - 1; i >= 0; i --) {
	                final long localWrittenBytes = ch.write(nioBuffers, 0, nioBufferCnt);
	                if (localWrittenBytes == 0) {
	                    setOpWrite = true;
	                    break;
	                }
	                expectedWrittenBytes -= localWrittenBytes;
	                writtenBytes += localWrittenBytes;
	                if (expectedWrittenBytes == 0) {
	                    done = true;
	                    break;
	                }
	            }
	            break;
	    }
	
	    // Release the fully written buffers, and update the indexes of the partially written buffer.
	    in.removeBytes(writtenBytes);
	
	    if (!done) {
	        // Did not write all buffers completely.
	        incompleteWrite(setOpWrite);
	        break;
	    }
	}
}

ChannelOutboundBuffer.removeBytes()

记录写的progress,若current()的buffer写完,会调用remove();
最后调用clearNioBuffers();

/**
* Removes the fully written entries and update the reader index of the partially written entry.
* This operation assumes all messages in this buffer is {@link ByteBuf}.
*/
public void removeBytes(long writtenBytes) {
	for (;;) {
	    Object msg = current();
	    if (!(msg instanceof ByteBuf)) {
	        assert writtenBytes == 0;
	        break;
	    }
	
	    final ByteBuf buf = (ByteBuf) msg;
	    final int readerIndex = buf.readerIndex();
	    final int readableBytes = buf.writerIndex() - readerIndex;
	
	    if (readableBytes <= writtenBytes) {
	        if (writtenBytes != 0) {
	            progress(readableBytes);
	            writtenBytes -= readableBytes;
	        }
	        remove();
	    } else { // readableBytes > writtenBytes
	        if (writtenBytes != 0) {
	            buf.readerIndex(readerIndex + (int) writtenBytes);
	            progress(writtenBytes);
	        }
	        break;
	    }
	}
	clearNioBuffers();
}

remove()

调用removeEntry(e)

/**
* Will remove the current message, mark its {@link ChannelPromise} as success and return {@code true}. If no
* flushed message exists at the time this method is called it will return {@code false} to signal that no more
* messages are ready to be handled.
*/
public boolean remove() {
	Entry e = flushedEntry;
	if (e == null) {
	    clearNioBuffers();
	    return false;
	}
	Object msg = e.msg;
	
	ChannelPromise promise = e.promise;
	int size = e.pendingSize;
	
	removeEntry(e);
	
	if (!e.cancelled) {
	    // only release message, notify and decrement if it was not canceled before.
	    ReferenceCountUtil.safeRelease(msg);
	    safeSuccess(promise);//回调
	    decrementPendingOutboundBytes(size, false, true);//小于低水位,设置write快速再写
	}
	
	// recycle the entry 回收
	e.recycle();
	
	return true;
}

removeEntry()

–flushed
若flushed为0flushedEntity设置null
否则把next设置给flushed

private void removeEntry(Entry e) {
        if (-- flushed == 0) {
            // processed everything
            flushedEntry = null;
            if (e == tailEntry) {
                tailEntry = null;
                unflushedEntry = null;
            }
        } else {
            flushedEntry = e.next;
        }
    }
//小于低水位,设置write快速再写
private void decrementPendingOutboundBytes(long size, boolean invokeLater, boolean notifyWritability) {
	if (size == 0) {
	    return;
	}
	
	long newWriteBufferSize = TOTAL_PENDING_SIZE_UPDATER.addAndGet(this, -size);
	if (notifyWritability && newWriteBufferSize < channel.config().getWriteBufferLowWaterMark()) {
	    setWritable(invokeLater);
	}
}


private void setWritable(boolean invokeLater) {
	for (;;) {
	    final int oldValue = unwritable;
	    final int newValue = oldValue & ~1;
	    if (UNWRITABLE_UPDATER.compareAndSet(this, oldValue, newValue)) {
	        if (oldValue != 0 && newValue == 0) {
	            fireChannelWritabilityChanged(invokeLater);
	        }
	        break;
	    }
	}
}        

clearNioBuffers()

把线程中分配的BUF设置为null

 // Clear all ByteBuffer from the array so these can be GC'ed.
    // See https://github.com/netty/netty/issues/3837
    private void clearNioBuffers() {
        int count = nioBufferCount;
        if (count > 0) {
            nioBufferCount = 0;
            Arrays.fill(NIO_BUFFERS.get(), 0, count, null);
        }
    }
  private static final FastThreadLocal<ByteBuffer[]> NIO_BUFFERS = new FastThreadLocal<ByteBuffer[]>() {
        @Override
        protected ByteBuffer[] initialValue() throws Exception {
            return new ByteBuffer[1024];
        }
    };

AbstractNioByteChannel.incompleteWrite()

网咯拥塞的时候(writeBytes返回0)设置write,加速写

protected final void incompleteWrite(boolean setOpWrite) {
	// Did not write completely.
	if (setOpWrite) {
	    setOpWrite();
	} else {
	    // Schedule flush again later so other tasks can be picked up in the meantime
	    Runnable flushTask = this.flushTask;
	    if (flushTask == null) {
	        flushTask = this.flushTask = new Runnable() {
	            @Override
	            public void run() {
	                flush();
	            }
	        };
	    }
	    eventLoop().execute(flushTask);
	}
}

setOpWrite()

  protected final void setOpWrite() {
        final SelectionKey key = selectionKey();
        // Check first if the key is still valid as it may be canceled as part of the deregistration
        // from the EventLoop
        // See https://github.com/netty/netty/issues/2104
        if (!key.isValid()) {
            return;
        }
        final int interestOps = key.interestOps();
        if ((interestOps & SelectionKey.OP_WRITE) == 0) {
            key.interestOps(interestOps | SelectionKey.OP_WRITE);
        }
    }

clearOpWrite()

protected final void clearOpWrite() {
	final SelectionKey key = selectionKey();
	// Check first if the key is still valid as it may be canceled as part of the deregistration
	// from the EventLoop
	// See https://github.com/netty/netty/issues/2104
	if (!key.isValid()) {
	    return;
	}
	final int interestOps = key.interestOps();
	if ((interestOps & SelectionKey.OP_WRITE) != 0) {
	    key.interestOps(interestOps & ~SelectionKey.OP_WRITE);
	}
}
[root@yfw ~]# cd /opt/openfire [root@yfw openfire]# ls -l /opt/openfire/plugins/restapi/ ls: cannot access '/opt/openfire/plugins/restapi/': No such file or directory [root@yfw openfire]# tail -f /opt/openfire/logs/openfire.log at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) [netty-common-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.108.Final.jar:4.1.108.Final] at java.lang.Thread.run(Thread.java:829) [?:?] 2025.10.03 00:48:37.616 ERROR [socket_c2s-thread-4]: org.jivesoftware.openfire.nio.NettyConnection - Problem during connection close or cleanup io.netty.channel.StacklessClosedChannelException: null at io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] 2025.10.03 00:48:51.280 WARN [socket_c2s-thread-5]: org.jivesoftware.openfire.net.StanzaHandler - TLS requested by initiator when TLS was never offered by server. Closing connection: NettyConnection{peer: /64.62.197.2:37526, state: CLOSED, session: LocalClientSession{address=localhost/fd019775-0604-48c3-890b-927d4a9329f2, streamID=a1slsqrfla, status=CLOSED, isEncrypted=false, isDetached=false, serverName='localhost', isInitialized=false, hasAuthToken=false, peer address='64.62.197.2', presence=' <presence type="unavailable"/>'}, Netty channel handler context name: NettyClientConnectionHandler#0} 2025.10.03 01:25:57.988 WARN [PluginMonitorExec-2]: org.jivesoftware.openfire.container.PluginManager - Ignoring plugin 'restapi-openfire-plugin-assembly': requires server version 5.0.0. Current server version is 4.9.2. 2025.10.03 01:29:11.523 WARN [socket_c2s-thread-7]: org.jivesoftware.openfire.nio.NettyXMPPDecoder - Error occurred while decoding XMPP stanza, closing connection: NettyConnection{peer: /35.203.210.191:59128, state: OPEN, session: LocalClientSession{address=localhost/33ef000c-a3c3-4c31-ae17-74d84ffa0605, streamID=597g80vfvc, status=CONNECTED, isEncrypted=false, isDetached=false, serverName='localhost', isInitialized=false, hasAuthToken=false, peer address='35.203.210.191', presence=' <presence type="unavailable"/>'}, Netty channel handler context name: NettyClientConnectionHandler#0} java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.read0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39) ~[?:?] at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:276) ~[?:?] at sun.nio.ch.IOUtil.read(IOUtil.java:233) ~[?:?] at sun.nio.ch.IOUtil.read(IOUtil.java:223) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:356) ~[?:?] at io.netty.buffer.PooledByteBuf.setBytes(PooledByteBuf.java:255) ~[netty-buffer-4.1.108.Final.jar:4.1.108.Final] at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1132) ~[netty-buffer-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:357) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:151) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) [netty-common-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.108.Final.jar:4.1.108.Final] at java.lang.Thread.run(Thread.java:829) [?:?] 2025.10.03 01:29:11.525 ERROR [socket_c2s-thread-7]: org.jivesoftware.openfire.nio.NettyConnection - Problem during connection close or cleanup java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:47) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:113) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:58) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:50) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:462) ~[?:?] at io.netty.channel.socket.nio.NioSocketChannel.doWrite(NioSocketChannel.java:415) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannel$AbstractUnsafe.flush0(AbstractChannel.java:931) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.flush0(AbstractNioChannel.java:359) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannel$AbstractUnsafe.flush(AbstractChannel.java:895) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.flush(DefaultChannelPipeline.java:1372) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:935) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:921) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:907) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.ChannelOutboundHandlerAdapter.flush(ChannelOutboundHandlerAdapter.java:125) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:939) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:957) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:982) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:950) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:1000) ~[netty-transport-4.1.108.Final.jar:4.1.108.Final] at org.jivesoftware.openfire.nio.NettyConnection.close(NettyConnection.java:215) [xmppserver-4.9.2.jar:4.9.2] at org.jivesoftware.openfire.nio.NettyXMPPDecoder.exceptionCaught(NettyXMPPDecoder.java:71) [xmppserver-4.9.2.jar:4.9.2] at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:346) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:325) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.fireExceptionCaught(AbstractChannelHandlerContext.java:317) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.exceptionCaught(DefaultChannelPipeline.java:1377) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:346) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeExceptionCaught(AbstractChannelHandlerContext.java:325) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.DefaultChannelPipeline.fireExceptionCaught(DefaultChannelPipeline.java:907) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.handleReadException(AbstractNioByteChannel.java:125) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:177) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) [netty-transport-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) [netty-common-4.1.108.Final.jar:4.1.108.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.108.Final.jar:4.1.108.Final] at java.lang.Thread.run(Thread.java:829) [?:?]
最新发布
10-04
🎉 你已经完成了 Openfire 插件的部署,但现在遇到了一个**关键问题**:插件没有被加载! --- ## ❌ 核心错误(来自日志): ```text 2025.10.03 01:25:57.988 WARN [PluginMonitorExec-2]: org.jivesoftware.openfire.container.PluginManager - Ignoring plugin 'restapi-openfire-plugin-assembly': requires server version 5.0.0. Current server version is 4.9.2. ``` --- ## 🔴 问题分析 ### ✅ 你做了什么? - 成功构建了 `restAPI-openfire-plugin-assembly.jar` - 正确复制到了 `/opt/openfire/plugins/` 目录 - 文件名正确(Maven 构建无误) ### ❌ 为什么没生效? > **你的 Openfire 版本是 `4.9.2`,但这个插件要求 `Openfire 5.0.0+`** 所以 Openfire 主动 **忽略了该插件**,根本不会尝试加载它。 --- ## 📁 插件命名说明 你看到的日志中提示的是: ``` Ignoring plugin 'restapi-openfire-plugin-assembly' ``` 注意: - 实际文件名是:`restAPI-openfire-plugin-assembly.jar` - Openfire 自动去掉 `.jar` 后缀,并将名称转为小写 → `restapi-openfire-plugin-assembly` 这是正常行为。 但由于版本不兼容,**连解压都不会进行**,因此: ```bash ls /opt/openfire/plugins/restapi/ ``` 输出: ```text No such file or directory ``` ✅ 完全合理 —— 因为插件从未被加载,所以没有创建解压目录。 --- ## 💡 如何确认当前 Openfire 版本? 运行: ```bash cat /opt/openfire/resources/database/openfire_version.xml | grep "version" ``` 或查看管理界面底部信息。 你已知是 `4.9.2`,低于所需版本 `5.0.0`。 --- ## ✅ 解决方案(三种选择) --- ### ✅ 方案一:【推荐】降级插件源码以支持 Openfire 4.9.2 你需要修改插件项目的 `pom.xml` 中对 Openfire API 的依赖版本。 #### 修改前(可能): ```xml <dependency> <groupId>org.igniterealtime.openfire</groupId> <artifactId>openfire-core</artifactId> <version>5.0.0</version> <scope>provided</scope> </dependency> ``` #### 修改后(适配 4.9.2): ```xml <dependency> <groupId>org.igniterealtime.openfire</groupId> <artifactId>openfire</artifactId> <version>4.9.2</version> <scope>provided</scope> </dependency> ``` 然后重新编译: ```bash mvn clean install ``` 再部署新生成的 JAR。 --- ### ✅ 方案二:升级 Openfire 到 5.0.0 或更高版本 前往官网下载最新版 Openfire: 🔗 https://www.igniterealtime.org/projects/openfire/ 备份配置和数据库后升级: ```bash # 备份重要数据 cp -r /opt/openfire/conf /backup/openfire-conf-backup cp /opt/openfire/embedded-db/openfire.script /backup/ # 停止旧服务 systemctl stop openfire # 安装新版 Openfire(例如通过 RPM 或 tar.gz) # 然后恢复 conf 和数据库 ``` 升级后,你的插件应该能直接使用。 --- ### ✅ 方案三:检查是否有兼容 Openfire 4.9.2 的插件版本 有些项目会维护多个分支。例如: - `main` 分支 → 支持 Openfire 5.x - `openfire-4.x` 分支 → 支持 Openfire 4.9.x 查看 GitHub/GitLab 仓库是否有类似分支: ```bash git branch -r | grep -i openfire ``` 切换并重新构建: ```bash git checkout origin/openfire-4.x mvn clean install ``` --- ## 🔍 补充建议:如何查看插件所需的最低 Openfire 版本? 通常在以下两个位置定义: ### 1. `plugin.xml` 文件(打包在 JAR 内部) 你可以查看: ```bash jar -xf target/restAPI-openfire-plugin-assembly.jar plugin.xml cat plugin.xml ``` 查找字段: ```xml <minServerVersion>5.0.0</minServerVersion> ``` 这就是导致你被拒绝的原因! 👉 修改这个值为 `<minServerVersion>4.9.2</minServerVersion>` 并重新打包,可以“绕过”检查(但仅当实际代码也兼容时才安全)。 --- ## ✅ 总结 | 项目 | 内容 | |------|------| | ❌ 问题原因 | 插件要求 Openfire 5.0.0,当前运行的是 4.9.2 | | 🚫 结果 | 插件被忽略,未解压、未加载 | | ✅ 解决方法 | (1) 修改依赖降级插件;(2) 升级 Openfire;(3) 使用兼容分支 | | 🔍 验证方式 | 查看 `plugin.xml` 中的 `<minServerVersion>` 字段 | ---
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值