Shared File System Master Slave 全配置以及测试

本文详细介绍了如何在ActiveMQ中配置2个broker实现共享文件测试,并验证了主节点宕机后从节点能无缝接管处理消息的能力。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

在本机完成2个broker的共享文件测试
2个broker的完整配置文件如下

  
<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
    The ASF licenses this file to You under the Apache License, Version 2.0
    (the "License"); you may not use this file except in compliance with
    the License.  You may obtain a copy of the License at
   
    http://www.apache.org/licenses/LICENSE-2.0
   
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
-->
<!--  
    Create a dynamic network of brokers
    For more information, see:
    
    http://activemq.apache.org/networks-of-brokers.html
    
    To run this example network of ActiveMQ brokers run
    
    $ bin/activemq console xbean:conf/activemq-dynamic-network-broker1.xml
    
    and
    
    $ bin/activemq console xbean:conf/activemq-dynamic-network-broker2.xml
    
    in separate consoles
 -->
<beans
  xmlns="http://www.springframework.org/schema/beans"
  xmlns:amq="http://activemq.apache.org/schema/core"
  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
  http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd   
  http://activemq.apache.org/camel/schema/spring http://activemq.apache.org/camel/schema/spring/camel-spring.xsd">


    <bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer"/>


    <broker xmlns="http://activemq.apache.org/schema/core" brokerName="dynamic-broker2" dataDirectory="${activemq.base}/data" >


        <!-- Destination specific policies using destination names or wildcards -->
        <destinationPolicy>
            <policyMap>
                <policyEntries>
                    <policyEntry queue=">" producerFlowControl="true" memoryLimit="20mb">
                        <deadLetterStrategy>
                          <individualDeadLetterStrategy queuePrefix="DLQ." useQueueForQueueMessages="true" />
                        </deadLetterStrategy>
                    </policyEntry>
                    <policyEntry topic=">" producerFlowControl="true" memoryLimit="20mb">
                    </policyEntry>
                </policyEntries>
            </policyMap>
        </destinationPolicy>


        <!-- Use the following to configure how ActiveMQ is exposed in JMX -->
        <managementContext>
            <managementContext createConnector="true" connectorPort="1100"/>
        </managementContext>


        <!--
            Configure network connector to use multicast protocol
            For more information, see
            
            http://activemq.apache.org/multicast-transport-reference.html
        -->
        <networkConnectors>
          <networkConnector uri="multicast://default"
            dynamicOnly="true" 
            networkTTL="3" 
            prefetchSize="1" 
            decreaseNetworkConsumerPriority="true" />
        </networkConnectors>
        
	
	    <persistenceAdapter>
	        <!--<kahaDB directory="${activemq.base}/data/dynamic-broker2/kahadb" />-->
	        <kahaDB directory="D:/"/> 
	    </persistenceAdapter>
	    
	    <!--  The maximum amount of space the broker will use before slowing down producers -->
	    <systemUsage>
	        <systemUsage>
	            <memoryUsage>
	                <memoryUsage limit="20 mb"/>
	            </memoryUsage>
	            <storeUsage>
	                <storeUsage limit="1 gb" name="foo"/>
	            </storeUsage>
	            <tempUsage>
	                <tempUsage limit="100 mb"/>
	            </tempUsage>
	        </systemUsage>
	    </systemUsage>


        <!-- 
            The transport connectors ActiveMQ will listen to
            Configure discovery URI to use multicast protocol
        -->
        <transportConnectors>
            <transportConnector name="openwire" uri="tcp://0.0.0.0:61618" discoveryUri="multicast://default" />
        </transportConnectors>


    </broker>


</beans>

另外一个Borker


<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
    The ASF licenses this file to You under the Apache License, Version 2.0
    (the "License"); you may not use this file except in compliance with
    the License.  You may obtain a copy of the License at
   
    http://www.apache.org/licenses/LICENSE-2.0
   
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
-->
<!--  
    Create a dynamic network of brokers
    For more information, see:
    
    http://activemq.apache.org/networks-of-brokers.html
    
    To run this example network of ActiveMQ brokers run
    
    $ bin/activemq console xbean:conf/activemq-dynamic-network-broker1.xml
    
    and
    
    $ bin/activemq console xbean:conf/activemq-dynamic-network-broker2.xml
    
    in separate consoles
 -->
<beans
  xmlns="http://www.springframework.org/schema/beans"
  xmlns:amq="http://activemq.apache.org/schema/core"
  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
  http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">


    <bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer"/>


    <broker xmlns="http://activemq.apache.org/schema/core" brokerName="dynamic-broker1" dataDirectory="${activemq.base}/data" >


        <!-- Destination specific policies using destination names or wildcards -->
        <destinationPolicy>
            <policyMap>
                <policyEntries>
                    <policyEntry queue=">" producerFlowControl="true" memoryLimit="20mb">
                        <deadLetterStrategy>
                          <individualDeadLetterStrategy queuePrefix="DLQ." useQueueForQueueMessages="true" />
                        </deadLetterStrategy>
                    </policyEntry>
                    <policyEntry topic=">" producerFlowControl="true" memoryLimit="20mb">
                    </policyEntry>
                </policyEntries>
            </policyMap>
        </destinationPolicy>


        <!-- Use the following to configure how ActiveMQ is exposed in JMX -->
        <managementContext>
            <managementContext createConnector="true"/>
        </managementContext>


        <!--
            Configure network connector to use multicast protocol
            For more information, see
            
            http://activemq.apache.org/multicast-transport-reference.html
        -->
	    <networkConnectors>
	      <networkConnector uri="multicast://default"
	        dynamicOnly="true" 
	        networkTTL="3" 
	        prefetchSize="1" 
	        decreaseNetworkConsumerPriority="true" />
	    </networkConnectors>
	    
        <persistenceAdapter>
            <!--<kahaDB directory="${activemq.base}/data/dynamic-broker1/kahadb"/>-->
            <kahaDB directory="D:/"/> 
        </persistenceAdapter>
        
        <!--  The maximum amount of space the broker will use before slowing down producers -->
        <systemUsage>
            <systemUsage>
                <memoryUsage>
                    <memoryUsage limit="20 mb"/>
                </memoryUsage>
                <storeUsage>
                    <storeUsage limit="1 gb" name="foo"/>
                </storeUsage>
                <tempUsage>
                    <tempUsage limit="100 mb"/>
                </tempUsage>
            </systemUsage>
        </systemUsage>


	    <!-- 
            The transport connectors ActiveMQ will listen to
            Configure discovery URI to use multicast protocol
        -->
        <transportConnectors>
            <transportConnector name="openwire" uri="tcp://0.0.0.0:61616" discoveryUri="multicast://default" />
        </transportConnectors>
        


    </broker>


</beans>

上述2个配置文件中,重点在于2个配置文件在同一个目录,<kahaDB directory="D:/"/> 

分别启动2个broker。

客户端连接按照:private static final String URL="failover:(tcp://localhost:61616,tcp://localhost:61618)"; 进行连接,即初始61616端口为Master,61618为Slave .

1、验证是否可以正常发送接收。

启动producer发送消息,日志输出

[Thread-1] Sending message: 'Message: 0 sent at: Wed Oct 30 09:46:42 CST 2013  ...'
[Thread-1] Sending message: 'Message: 1 sent at: Wed Oct 30 09:46:52 CST 2013  ...'
[Thread-1] Sending message: 'Message: 2 sent at: Wed Oct 30 09:47:02 CST 2013  ...'
[Thread-1] Sending message: 'Message: 3 sent at: Wed Oct 30 09:47:12 CST 2013  ...'
[Thread-1] Sending message: 'Message: 4 sent at: Wed Oct 30 09:47:22 CST 2013  ...'
[Thread-1] Sending message: 'Message: 5 sent at: Wed Oct 30 09:47:32 CST 2013  ...'

启动consumer接收消息,日志输出

[Thread-1] Received: 'Message: 0 sent at: Wed Oct 30 09:46:42 CST 2013  ...' (length 255)
[Thread-1] Received: 'Message: 1 sent at: Wed Oct 30 09:46:52 CST 2013  ...' (length 255)
[Thread-1] Received: 'Message: 2 sent at: Wed Oct 30 09:47:02 CST 2013  ...' (length 255)
[Thread-1] Received: 'Message: 3 sent at: Wed Oct 30 09:47:12 CST 2013  ...' (length 255)
[Thread-1] Received: 'Message: 4 sent at: Wed Oct 30 09:47:22 CST 2013  ...' (length 255)
[Thread-1] Received: 'Message: 5 sent at: Wed Oct 30 09:47:32 CST 2013  ...' (length 255)
说明收发正常。

2、验证Master宕机时,Slave进行消息处理,是否有消息丢失。

先停止consumer,记住此时已经接收了消息的序号,(此时消息id是18 待重启的时候看是否从下一个序号开始接收),procuder一直保持消息的发送,

停止61616端口上的master ,然后再启动consumer,发现consumer接收的消息从第19个消息开始接收。

Connecting to URL: failover:(tcp://localhost:61616,tcp://localhost:61618)
Consuming queue: TOOL.DEFAULT.SJ
Using a non-durable subscription
Running 1 parallel threads
log4j:WARN No appenders could be found for logger (org.apache.activemq.transport.failover.FailoverTransport).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
[Thread-1] Received: 'Message: 19 sent at: Wed Oct 30 09:49:52 CST 2013 ...' (length 255)
说明master宕机后,不用进行任何处理,slave就可以继续处理消息了。



文章目录概况安装PostgreSQL设置主节点设置从节点验证故障处理,主从节点切换从节点扩容概况CentOS Linux release 7.7.1908PostgreSQL13三台服务器,一主两从,实时复制。主节点读写,从节点只读,读写分离不借助插件或第三方中间件,仅使用PostgreSQL自带的流复制功能Replication主节点可建库建表、可读写,两个从节点为只读,程序使用时可以将查询和统计相关服务读取从节点,通过读写分离减少数据库读写压力设置同步复制,为热备份,数据实时同步,对于大多数业务来说,可以认为三个库数据完相同,当一个库故障时,其他库仍然可提供服务,增加服务可用性当主节点故障时,数据无法更新,只能手动将从节点升格为主节点,需要修改服务的数据库连接配置等,服务短暂不可用(也可提前准备好监控和切换脚本,服务不可用时尽早发现和切换)。需要根据自己业务特点,是否能接受此种情况,来决定是否使用此方式。单点故障,自动切换的集群,可以考虑使用PGPool-II 等第三方中间件搭建,后面研究好我会出一篇文档安装PostgreSQL命令如下,具体可参考我之前的博客《PostgreSQL简介和安装部署》# 安装 repository 源:sudo yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm# 安装 PostgreSQL13:sudo yum install -y postgresql13-server# 初始化数据库,并设置启动命令和开机启动:sudo /usr/pgsql-13/bin/postgresql-13-setup initdbsudo systemctl enable postgresql-13sudo systemctl start postgresql-13# 另外,查看状态,关闭命名和重启命令sudo systemctl status postgresql-13sudo systemctl stop postgresql-13sudo systemctl restart postgresql-13123456789101112131415三个节点安装完成后,就可以开始使用了可以创建用户、赋予权限、修改密码,可以建库建表早一些功能测试等默认安装创建的数据库,没有主从之分,都是主节点,可以使用这条语句验证下是否为从节点,查询结果为"f"表示false,当前数据库节点为主库节点sudo -u postgres /usr/pgsql-13/bin/psql -c "select pg_is_in_recovery()"1下面开始搭建集群,关闭三个PostgreSQL服务,开始改配置文件设置主节点主从节点可以自己规划好,准备好不同机器就行,也可以使用最小集群一主一从主要是修改2个配置文件,都在data目录下,一个是数据库基础配置,一个是访问权限控制默认初始化的数据库,data目录在/var/lib/pgsql/13/data修改文件夹里面的pg_hba.conf,设置 Replication 访问策略,允许流复制# replication privilege.# 追加一条“允许postgres用户,通过部网络地址使用 Replication ”的策略host replication postgres 0.0.0.0/0 trust123继续修改数据库配置文件postgres.conf,进行一些参数设置,允许流复制# -----------------------------# PostgreSQL configuration file# -----------------------------# - Connection Settings -listen_addresses = '*' # what IP address(es) to listen on;#port = 5432 # (change requires restart)max_connections = 200 # (change requires restart)# - Authentication -#authentication_timeout = 1min # 1s-600spassword_encryption = scram-sha-256 # md5 or scram-sha-256#------------------------------------------------------------------------------# RESOURCE USAGE (except WAL)#------------------------------------------------------------------------------# - Memory -shared_buffers = 1GB # min 128kB 官方推荐内存设置带大小为系统内存的1/4temp_buffers = 64MB # min 800kBwork_mem = 64MB # min 64kBmax_stack_depth = 4MB # min 100kBdynamic_shared_memory_type = posix # the default is the first option#------------------------------------------------------------------------------# WRITE-AHEAD LOG#------------------------------------------------------------------------------wal_level = replica # minimal, replica, or logicalfsync = on # flush data to disk for crash safetysynchronous_commit = on # synchronization level;wal_log_hints = on # also do full page writes of non-critical updates# - Checkpoints -checkpoint_timeout = 10min # range 30s-1dmax_wal_size = 1GBmin_wal_size = 80MB# - Archiving -archive_mode = on # enables archiving; off, on, or always# (change requires restart)archive_command = 'cp %p /data/postgresql/archive/%f' # command to use to archive a logfile segment#-----------------------------------------------------------------------------# REPLICATION#------------------------------------------------------------------------------max_wal_senders = 10 # max number of walsender processes 设置了可以最多有几个流复制的链接# (change requires restart)wal_keep_size = 1024 # in megabytes; 0 disablesmax_slot_wal_keep_size = 10 # in megabytes; -1 disableswal_sender_timeout = 120s # in milliseconds; 0 disables# - Standby Servers -hot_standby = on # "off" disallows queries during recovery#------------------------------------------------------------------------------# REPORTING AND LOGGING#------------------------------------------------------------------------------# - Where to Log -log_destination = 'stderr' # Valid values are combinations oflogging_collector = on # Enable capturing of stderr and csvloglog_directory = 'log' # directory where log files are written,# can be absolute or relative to PGDATAlog_filename = 'postgresql-%a.log' # log file name pattern,log_truncate_on_rotation = on # If on, an existing log file with thelog_rotation_age = 1d # Automatic rotation of logfiles willlog_rotation_size = 50MB # Automatic rotation of logfiles willlog_min_duration_statement = 2000 # 配置数据库慢查询时间,如果超过配置的时间,就会将查询的SQL语句记录到日志# - What to Log -log_connections = onlog_disconnections = onlog_line_prefix = '%m [%p] ' # special values:log_timezone = 'Asia/Shanghai'datestyle = 'iso, mdy'#intervalstyle = 'postgres'timezone = 'Asia/Shanghai'# These settings are initialized by initdb, but they can be changed.lc_messages = 'en_US.UTF-8' # locale for system error message# stringslc_monetary = 'en_US.UTF-8' # locale for monetary formattinglc_numeric = 'en_US.UTF-8' # locale for number formattinglc_time = 'en_US.UTF-8' # locale for time formatting# default configuration for text searchdefault_text_search_config = 'pg_catalog.english'shared_preload_libraries = '' # citus (change requires restart)1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586启动服务,使配置生效,主节点设置完毕sudo systemctl start postgresql-13# 或sudo systemctl restart postgresql-13123设置从节点主从要保持一致,主要是 data 保持一致,我们可以直接将主节点data同步到从节点如果从节点没有关闭,可以先关闭 sudo systemctl stop postgresql-13去默认的data位置,删除所有文件
最新发布
03-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值