Docker 网络
删除所有镜像:
[root@localhost ~]# docker rmi -f $(docker images -aq)
[root@localhost ~]# docker run -d -P --name tomcat01 tomcat
原理
我们启动一个docker容器,docker就会分配一个IP地址,我们只要安装了docker,就会有一个网卡docker,桥接模式,使用的技术是evth-pair技术。
linux中的网卡
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8d:fc:27 brd ff:ff:ff:ff:ff:ff
inet 192.168.75.131/24 brd 192.168.75.255 scope global eno16777736
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe8d:fc27/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:6f:8a:03:88 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:6fff:fe8a:388/64 scope link
valid_lft forever preferred_lft forever
docker容器中的网卡
/data # ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
28: eth0@if29: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:26:00:0c brd ff:ff:ff:ff:ff:ff
inet 172.38.0.12/16 brd 172.38.255.255 scope global eth0
valid_lft forever preferred_lft forever
evth-pair技术就是一对虚拟设备接口,他们都是成对出现的,一段连接协议,一段彼此相连
所有的容器不指定网络的情况下,都是docker0路由的, docker会给我们容器分配一个默认的可用IP地址。
小结
Docker使用的是linux的桥接,宿主机中是一个Docker容器的网桥,docker 0;docker中的所有网络接口都是虚拟的,虚拟的转发效率高!
# 可不可以通过docker容器名ping通
可以ping通,使用
[root@localhost ~]# docker run -d -P --name tomcat03 --link tomcat01 tomcat
5d2cda278437f08f5efa9fc3ef9230de3410c915e59161c69fefc017fbb34d41
[root@localhost ~]# docker exec -it tomcat03 ping tomcat01
PING tomcat01 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat01 (172.17.0.3): icmp_seq=1 ttl=64 time=0.458 ms
64 bytes from tomcat01 (172.17.0.3): icmp_seq=2 ttl=64 time=0.072 ms
--- tomcat01 ping statistics ---
8 packets transmitted, 8 received, 0% packet loss, time 11ms
rtt min/avg/max/mdev = 0.072/0.129/0.458/0.125 ms
# 反向ping不通
docker 容器为啥可以ping通,是增加了映射关系:172.17.0.3 tomcat01 4110f5e3cca5
[root@localhost ~]# docker exec -it tomcat03 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 tomcat01 4110f5e3cca5
172.17.0.5 5d2cda278437
不推荐使用—link。自定义网络,不适用docker0,docker0不知道容器名连接访问
自定义网络
查看所有的docker网络
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
29cd551f4612 bridge bridge local
9fbab3974280 host host local
c8937938fbb0 none null local
网络模式
bridge:桥接docker(默认)
none:不配置网络
host:和宿主机共享网络
container:容器网络连通
测试
# 我们直接启动的命令 --net bridge
[root@localhost ~]# docker run -d -P--name tomcat01 tomcat
[root@localhost ~]# docker run -d -P--name tomcat01 --net brige tomcat
# 自定义网络
[root@localhost ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
baf0043eea660a93d20517c0043be784c6b1e50c947fff6b0f865c248c7a4587
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
29cd551f4612 bridge bridge local
9fbab3974280 host host local
baf0043eea66 mynet bridge local
c8937938fbb0 none null local
# 创建的自己的网络
[root@localhost ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "baf0043eea660a93d20517c0043be784c6b1e50c947fff6b0f865c248c7a4587",
"Created": "2021-08-25T20:57:53.637432436-04:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]
# 把启动的容器发布到自己的网络当中去
[root@localhost ~]# docker run -d -P --name tomcat-net01 --net mynet tomcat
8ccbfe5365bf9684827b6baf592a76d411e17466a5910818ad99fd77f65bd985
[root@localhost ~]# docker run -d -P --name tomcat-net02 --net mynet tomcat
cb01e58812d0151ec1dbbe51e968b591e5ae394bfb38b0b9d05ee19ec83b0833
[root@localhost ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "baf0043eea660a93d20517c0043be784c6b1e50c947fff6b0f865c248c7a4587",
"Created": "2021-08-25T20:57:53.637432436-04:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"8ccbfe5365bf9684827b6baf592a76d411e17466a5910818ad99fd77f65bd985": {
"Name": "tomcat-net01",
"EndpointID": "7737e7cbff1e471275b81c7796966549091756dc76557cb388bdb938567f2aa9",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"cb01e58812d0151ec1dbbe51e968b591e5ae394bfb38b0b9d05ee19ec83b0833": {
"Name": "tomcat-net02",
"EndpointID": "0d1f3185d52155e83a519a366249f5db2a277b4bd887080b9cd3d952825f19d6",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
# 测试通过容器名可以ping通
[root@localhost ~]# docker exec -it tomcat-net02 ping tomcat-net01
PING tomcat-net01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat-net01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.108 ms
64 bytes from tomcat-net01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.083 ms
网络连通性
不通网络怎么通信
# 通过docker network connect 自定义网络名 需要连接的容器名
[root@localhost ~]# docker network connect mynet tomcat01
# 连通之后就是将tomcat01 放到了mynet网络中
# 一个容器有两个IP地址
[root@localhost ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "baf0043eea660a93d20517c0043be784c6b1e50c947fff6b0f865c248c7a4587",
"Created": "2021-08-25T20:57:53.637432436-04:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"277430061ed354fb9c0667874948535987318380a38a6f3bae05a5431fb73141": {
"Name": "tomcat01",
"EndpointID": "0bec4c03f32d516e8e8b9ab523c59e84c7333d3cb7e19308b1df069bfb2f901d",
"MacAddress": "02:42:c0:a8:00:04",
"IPv4Address": "192.168.0.4/16",
"IPv6Address": ""
},
"8ccbfe5365bf9684827b6baf592a76d411e17466a5910818ad99fd77f65bd985": {
"Name": "tomcat-net01",
"EndpointID": "7737e7cbff1e471275b81c7796966549091756dc76557cb388bdb938567f2aa9",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"cb01e58812d0151ec1dbbe51e968b591e5ae394bfb38b0b9d05ee19ec83b0833": {
"Name": "tomcat-net02",
"EndpointID": "0d1f3185d52155e83a519a366249f5db2a277b4bd887080b9cd3d952825f19d6",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
[root@localhost ~]#
# 测试是否通信
[root@localhost ~]# docker exec -it tomcat01 ping tomcat-net01
PING tomcat-net01 (192.168.0.2) 56(84) bytes of data.
64 bytes from tomcat-net01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.173 ms
64 bytes from tomcat-net01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.088 ms
^C
--- tomcat-net01 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.088/0.130/0.173/0.044 ms
实战:redis集群
# 创建网卡
[root@localhost ~]# docker network create redis --subnet 172.38.0.0/16
# 通过脚本创建6个redis配置
for port in $(seq 1 6); \
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
# 启动redis容器
docker run -p 6379:6379 -p 16379:16379 --name redis-1 -v /mydata/redis/node-1/data:/data -v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
# 脚本执行
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \
# 创建集群
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: f40a1620d6479c4bb300a3983691ef79efd1167b 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
M: f16e0a7c1ddb10e9ebf795b658ae51e899193572 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
M: c9e5c98a602829f45fa424bd48b22de519782bff 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
S: 11b4b85edf7762cb2bc5e87cf471cef0427a6a5f 172.38.0.14:6379
replicates c9e5c98a602829f45fa424bd48b22de519782bff
S: c0769bbfdc7d701710ce72ee14e791814e7b3431 172.38.0.15:6379
replicates f40a1620d6479c4bb300a3983691ef79efd1167b
S: 60142d5917635a6aa05c676cfea9c6a77062d289 172.38.0.16:6379
replicates f16e0a7c1ddb10e9ebf795b658ae51e899193572
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: f40a1620d6479c4bb300a3983691ef79efd1167b 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: c9e5c98a602829f45fa424bd48b22de519782bff 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: f16e0a7c1ddb10e9ebf795b658ae51e899193572 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 60142d5917635a6aa05c676cfea9c6a77062d289 172.38.0.16:6379
slots: (0 slots) slave
replicates f16e0a7c1ddb10e9ebf795b658ae51e899193572
S: 11b4b85edf7762cb2bc5e87cf471cef0427a6a5f 172.38.0.14:6379
slots: (0 slots) slave
replicates c9e5c98a602829f45fa424bd48b22de519782bff
S: c0769bbfdc7d701710ce72ee14e791814e7b3431 172.38.0.15:6379
slots: (0 slots) slave
replicates f40a1620d6479c4bb300a3983691ef79efd1167b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.