常用中间件部署总结

1. RabbitMQ服务启动

1.1 docker方式启动MQ

# latest RabbitMQ 3.10
docker run -it --rm --name rabbitmq -p 5672:5672 -p 15672:15672 rabbitmq:3.10-management

1.2 登录ui

http://127.0.0.1:15672/
用户名:guest
密码:guest

2. ClickHouse本地部署

编写docker-compose.yml文件:

version: '3'
services:
  elasticsearch:
    image: clickhouse/clickhouse-server
    container_name: clickhouse-server
    ulimits:
      nofile:
        soft: "262144"
        hard: "262144"
    volumes:
      - /home/clickhouse/data:/var/lib/clickhouse/
      - /home/clickhouse/logs:/var/log/clickhouse-server/
      - ./config.d:/etc/clickhouse-server/config.d/*.xml
      - ./usert.d:/etc/clickhouse-server/usert.d/*.xml
      - ./docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d/
    ports:
      - 18123:8123 # http接口用
      - 19000:9000 # 本地客户端用
docker-compose up -d

启动后,使用datagrid工具,连接127.0.0.1:18123即可

3. Nacos本地部署

在客户本地化部署时,作为微服务注册中心及配置管理便捷且高可用

3.1 编写docker-compose.yml文件

version: "3"
services:
  nacos:
    image: nacos/nacos-server:latest
    container_name: nacos
    environment:
      - PREFER_HOST_MODE=hostname
      - MODE=standalone
    ports:
      - "8848:8848"
      - "9848:9848"

运行docker-compose up -d命令后,访问http://127.0.0.1:8848/nacos,输入默认用户名nacos、密码nacos即可

4. Kafka K8S部署单节点

由于Kafka第一种方式依赖Zookeeper,我们可先来部署Zookeeper单节点,目前3.X版本默认已KRaft的方式启动,无需依赖ZK

4.1 zookeeper.yaml

该文件中我们新增了livenessProbe存活检查机制

apiVersion: apps/v1
kind: Deployment
metadata:
  name: zookeeper-pod
  namespace: middleware
  labels:
    app: zookeeper
spec:
  replicas: 1
  selector:
    matchLabels:
      project: zookeeper-pod
      app: zookeeper
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  revisionHistoryLimit: 5
  template:
    metadata:
      labels:
        project: zookeeper-pod
        app: zookeeper
    spec:
      containers:
        - name: zookeeper
          image: zookeeper:3.8.1
          imagePullPolicy: Always
          livenessProbe:
            tcpSocket:
              port: 2181
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-pod
  namespace: middleware
  labels:
    project: zookeeper-pod
    app: zookeeper
spec:
  type: NodePort
  selector:
    project: zookeeper-pod
    app: zookeeper
  ports:
    - port: 2181
      targetPort: 2181
      nodePort: 32181
# 部署zookeeper
kubectl apply -f zookeeper.yaml
# 查看pod运行状态
kubectl get pods -n middleware                                                
NAME                             READY   STATUS    RESTARTS   AGE
zookeeper-pod-55cbcd55d4-cfcqm   1/1     Running   0          19m

4.2 kafka.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka
  namespace: middleware
spec:
  replicas: 1
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  revisionHistoryLimit: 5
  selector:
    matchLabels:
      project: kafka
      app: kafka
  template:
    metadata:
      labels:
        project: kafka
        app: kafka
    spec:
      containers:
        - name: kafka
          image: bitnami/kafka:3.4.0
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
              name: web
              protocol: TCP
          env:
#            # 已zookeeper方式启动注释开始
#            - name: KAFKA_ENABLE_KRAFT
#              value: "no"
#            - name: KAFKA_CFG_ZOOKEEPER_CONNECT
#              value: zookeeper-pod.middleware:2181
#            - name: KAFKA_CFG_ADVERTISED_LISTENERS
#              value: PLAINTEXT://:30092
#            - name: KAFKA_CFG_LISTENERS
#              value: PLAINTEXT://:30092
            # 已zookeeper方式启动注释结束
            # 默认已KRaft方式启动
            - name: KAFKA_CFG_ADVERTISED_LISTENERS
              # 需配置hosts域名映射
              value: PLAINTEXT://kafka-server:30092
            #- name: KAFKA_HEAP_OPTS
            #  value: -Xmx2048m -Xms2048m
            - name: ALLOW_PLAINTEXT_LISTENER
              value: "yes"
---
apiVersion: v1
kind: Service
metadata:
  name: kafka
  namespace: middleware
  labels:
    app: kafka
spec:
  type: NodePort
  selector:
    project: kafka
    app: kafka
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30092
kubectl apply -f kafka.yaml
kubectl get pods -n middleware
NAME                             READY   STATUS    RESTARTS   AGE
kafka-7cc7786fcc-69mvp           1/1     Running   0          22s
zookeeper-pod-55cbcd55d4-cfcqm   1/1     Running   0          34m
# 删除之前部署的redis
kubectl delete deployment redis-pod -n middleware
# 查看当前所有的部署
kubectl get deployment -n middleware
NAME            READY   UP-TO-DATE   AVAILABLE   AGE
kafka           1/1     1            1           14m
zookeeper-pod   1/1     1            1           38m

OK, K8S本地部署单节点Zookeeper与Kafka成功,后面我们就开始Kafka的实践学习之旅了

5. ElasticSearch本地部署

对于之前的部署方式一般用于生产环境,而对于学习而言Docker方式快速部署就好了,本示例在window10环境下进行。

5.1 Docker使用Elasticsearch 需要对vm.max_map_count进行如下配置

# Windows and macOS with Docker Desktop
docker-machine ssh
sudo sysctl -w vm.max_map_count=262144
# Windows with  Docker Desktop WSL 2 backend
wsl -d docker-desktop
sysctl -w vm.max_map_count=262144

5.2. docker-compose方式部署

version: '3'
services:
  kibana:
    image: docker.elastic.co/kibana/kibana:7.11.2
    container_name: kibana711
    environment:
      - I18N_LOCALE=zh-CN
      - XPACK_GRAPH_ENABLED=true
      - TIMELION_ENABLED=true
      - XPACK_MONITORING_COLLECTION_ENABLED="true"
    ports:
      - "5601:5601"
    networks:
      - es711net
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.11.2
    container_name: es711
    environment:
      - cluster.name=shenjian
      - node.name=es711
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
      - discovery.seed_hosts=es711
      - cluster.initial_master_nodes=es711
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - es73data1:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    networks:
      - es711net

volumes:
  es73data1:
    driver: local

networks:
  es711net:
    driver: bridge

验证ES节点 http://127.0.0.1:9200/

{
  "name" : "es711",
  "cluster_name" : "shenjian",
  "cluster_uuid" : "UCRt6zcbTEKVJHD8nRjkQA",
  "version" : {
    "number" : "7.11.2",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "3e5a16cfec50876d20ea77b075070932c6464c7d",
    "build_date" : "2021-03-06T05:54:38.141101Z",
    "build_snapshot" : false,
    "lucene_version" : "8.7.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

访问Kibana http://127.0.0.1:5601/

欢迎使用 Elastic

欢迎关注公众号算法小生或沈健的技术博客

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

算法小生Đ

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值