通过ip netns和ip link模拟虚拟网卡
ip netns
#帮助信息
[root@docker ~]# ip netns help
Usage: ip netns list
ip netns add NAME
ip netns set NAME NETNSID
ip [-all] netns delete [NAME]
ip netns identify [PID]
ip netns pids NAME
ip [-all] netns exec [NAME] cmd ...
ip netns monitor
ip netns list-id
#添加两个网络名称空间
[root@docker ~]# ip netns add ns1
[root@docker ~]# ip netns add ns2
#查看网络名称空间列表
[root@docker ~]# ip netns list
ns2
ns1
#在名称空间中执行命令,默认只有lo设备
[root@docker ~]# ip netns exec ns1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ip link
#创建一对虚拟网卡
[root@docker ~]# ip link add name veth1.1 type veth peer name veth1.2
[root@docker ~]# ip link sh
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:76:d2:d2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
link/ether 02:42:dc:f6:13:ef brd ff:ff:ff:ff:ff:ff
4: veth1.2@veth1.1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether ca:99:86:14:1d:1c brd ff:ff:ff:ff:ff:ff
5: veth1.1@veth1.2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether b6:87:2c:19:84:4b brd ff:ff:ff:ff:ff:ff
#将veth1.2移入至ns1名称空间中,查看目前主机上只剩下veth1.1
[root@docker ~]# ip link set dev veth1.2 netns ns1
[root@docker ~]# ip link show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:76:d2:d2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
link/ether 02:42:dc:f6:13:ef brd ff:ff:ff:ff:ff:ff
5: veth1.1@if4: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether b6:87:2c:19:84:4b brd ff:ff:ff:ff:ff:ff link-netnsid 0
#查看ns1名称空间下的网卡信息
[root@docker ~]# ip netns list
ns2
ns1 (id: 0)
[root@docker ~]# ip netns exec ns1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth1.2: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether ca:99:86:14:1d:1c txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
#激活网卡设备
[root@docker ~]# ifconfig veth1.1 10.1.0.1/24 up
[root@docker ~]# ifconfig
... ...
veth1.1: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.1.0.1 netmask 255.255.255.0 broadcast 10.1.0.255
ether b6:87:2c:19:84:4b txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
#激活ns1名称空间中的veth1.2网卡
[root@docker ~]# ip netns exec ns1 ifconfig veth1.2 10.1.0.2/24 up
[root@docker ~]# ip netns exec ns1 ip addr
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
4: veth1.2@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 22:f2:64:f7:5c:aa brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.1.0.2/24 brd 10.1.0.255 scope global veth1.2
valid_lft forever preferred_lft forever
inet6 fe80::20f2:64ff:fef7:5caa/64 scope link
valid_lft forever preferred_lft forever
#测试网络是否连通
[root@docker ~]# ip netns exec ns1 ping 10.1.0.1
PING 10.1.0.1 (10.1.0.1) 56(84) bytes of data.
64 bytes from 10.1.0.1: icmp_seq=1 ttl=64 time=0.098 ms
64 bytes from 10.1.0.1: icmp_seq=2 ttl=64 time=0.027 ms
64 bytes from 10.1.0.1: icmp_seq=3 ttl=64 time=0.036 ms
^C
--- 10.1.0.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2004ms
rtt min/avg/max/mdev = 0.027/0.053/0.098/0.032 ms
docker网络类型
- none:无网络模式
- bridge:默认模式,相当于NAT
- host:公用宿主机Network NameSpace
- container:与其他容器公用Network NameSpace
#查看docker支持的网络类型
[root@docker2 log]# docker network ls
NETWORK ID NAME DRIVER SCOPE
bb626c847d66 bridge bridge local
ed8e2d349c13 host host local
ecc99e836ca1 none null local
#查看docker网络基础信息
[root@docker ~]# docker network inspect bridge
[
{
"Name": "bridge",
"Id": "7c163424f65afc88a25a5c9d9aaa81015575497faa486e7e30f4ac9b6667b42e",
"Created": "2021-10-25T20:02:30.359045381-04:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"0622a1a96e7dfbd2adb675c51566e56a2d499224fac6803db041b57bad1aa89b": {
"Name": "silly_brahmagupta",
"EndpointID": "667007511dbc197e4ab2d66b0bdd6cca3bd50de9847e893f91b0a384a80be0ab",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"3b80ac9ed2c4010042def5eb29423f8bc887978095831fb61f1faee5c493bfce": {
"Name": "distracted_sammet",
"EndpointID": "ffcb0d31c84e3b49cd47e959244e17cb894468592674885399151a17e48ed9fb",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
bridge模式(默认docker网络模式)
#使用--network bridge来设置网络类型为bridge
[root@docker ~]# docker container run -it --rm --network bridge busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:7 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:578 (578.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
#不使用--network启动busybox容器同使用--network bridge
#注:使用--rm参数在容器退出时,容器会自动删除
[root@docker ~]# docker container run -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:6 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:508 (508.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
联盟式容器(Joinded containers)
联盟式容器是指使用某个已存在容器的网络接口的容器,接口被联盟内的各容器共享使用,因此,联盟式容器彼此间完全无隔离,彼此共享同一个网络名称空间,其它名称空间还是隔离的,彼此之间存在端口冲突的可能性,通常只会在多个容器上的程序需要loopback接口互相通信、或对某已存在的容器网络属性进行监控时才使用此种模式的网络模型。
#启动第一个容器
[root@docker ~]# docker run --name b1 -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:6 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:508 (508.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
#启动第二个容器
[root@docker ~]# docker run --name b2 -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03
inet addr:172.17.0.3 Bcast:172.17.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:7 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:578 (578.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
#共享b1的网络名称空间,启动第二个容器
[root@docker ~]# docker run --name b2 --network container:b1 -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:8 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:648 (648.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
#在b1容器中启动httpd服务
/ # echo 'busybox--b1' >/tmp/index.html
/ # httpd -h /tmp/
/ # netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 :::80 :::* LISTEN 11/httpd
#在b2容器中查看,同样httpd服务已启动
/ # netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 :::80 :::* LISTEN -
/ # wget -O - -q 127.0.0.1
busybox--b1
host模式启动容器
#通过--network host指定容器共享宿主机的网络名称空间
[root@docker ~]# docker container run --name b2 --network host -it --rm busybox
/ # ifconfig
docker0 Link encap:Ethernet HWaddr 02:42:88:EA:AE:31
inet addr:172.17.0.1 Bcast:172.17.255.255 Mask:255.255.0.0
inet6 addr: fe80::42:88ff:feea:ae31/64 Scope:Link
UP BROADCAST MULTICAST MTU:1500 Metric:1
RX packets:72 errors:0 dropped:0 overruns:0 frame:0
TX packets:81 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:5369 (5.2 KiB) TX bytes:12075 (11.7 KiB)
ens33 Link encap:Ethernet HWaddr 00:0C:29:76:D2:D2
inet addr:192.168.88.101 Bcast:192.168.88.255 Mask:255.255.255.0
inet6 addr: fe80::a03d:1c5b:d3d8:111c/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:11927 errors:0 dropped:0 overruns:0 frame:0
TX packets:5794 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:4468318 (4.2 MiB) TX bytes:717655 (700.8 KiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:41 errors:0 dropped:0 overruns:0 frame:0
TX packets:41 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:6415 (6.2 KiB) TX bytes:6415 (6.2 KiB)
veth1.1 Link encap:Ethernet HWaddr 96:3C:C6:3F:E8:DE
inet addr:10.1.0.1 Bcast:10.1.0.255 Mask:255.255.255.0
inet6 addr: fe80::943c:c6ff:fe3f:e8de/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:13 errors:0 dropped:0 overruns:0 frame:0
TX packets:13 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1026 (1.0 KiB) TX bytes:1026 (1.0 KiB)
/ # echo 'b1-container'>/tmp/index.html
#在容器中启动httpd服务,因此容器与宿主机共享网络名称空间,访问宿主机的IP地址,效果如下图
/ # httpd -h /tmp/
/ # netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN -
tcp 0 0 :::80 :::* LISTEN 10/httpd
tcp 0 0 :::21 :::* LISTEN -
tcp 0 0 :::22 :::* LISTEN -
tcp 0 0 ::1:25 :::* LISTEN -
自定义docker0桥的网络属性
#自定义docker0桥的网络属性,在/etc/docker/daemon.json配置文件添加以下信息
#docker0桥的IP地址,bip即bridge ip
"bip":"192.168.1.1/24",
"fixed-cidr":"10.20.0.0/16",
"fixed-cidr-v6":"2001:dc8::/64",
"mtu":1500,
#设置默认网关
"default-gateway":"10.20.1.1",
"default-gateway-v6":"2001:dc8:acde::98",
#不使用宿主机的dns设置,设置容器dns服务器
"dns":["114.114.114.114","8.8.8.8"]
[root@docker ~]# vim /etc/docker/daemon.json
"bip":"10.0.0.1/16"
[root@docker ~]# systemctl start docker
[root@docker ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.0.0.1 netmask 255.255.0.0 broadcast 10.0.255.255
inet6 fe80::42:88ff:feea:ae31 prefixlen 64 scopeid 0x20<link>
ether 02:42:88:ea:ae:31 txqueuelen 0 (Ethernet)
RX packets 72 bytes 5369 (5.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 81 bytes 12075 (11.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
开放docker的访问,主其它机器可以访问
#docker默认仅监听Unix Socket格式的地址,/var/run/docker.sock,如使用TCP套接字,需修改/etc/docker/daemon.json,也可向dockerd直接传递-H|--host选项
"hosts":["tcp://0.0.0.0:2375","unix:///var/run/docker.sock"]
#重新启动docker服务后,查看监听端口--2375
[root@docker ~]# netstat -tnlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1163/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1363/master
tcp6 0 0 :::2375 :::* LISTEN 1513/dockerd
tcp6 0 0 :::21 :::* LISTEN 1193/vsftpd
tcp6 0 0 :::22 :::* LISTEN 1163/sshd
tcp6 0 0 ::1:25 :::* LISTEN 1363/master
#查看docker2服务器上的docker镜像列表
[root@docker2 ~]# docker image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
#通过IP:Port连接docker1查看镜像列表
[root@docker2 ~]# docker -H 192.168.88.101:2375 image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
tye/busybox v1.0 0cba66781b54 47 hours ago 1.24MB
<none> <none> 58dad6d6bba9 47 hours ago 1.24MB
ubuntu latest ba6acccedd29 11 days ago 72.8MB
centos 6.10 f1af727749c4 5 weeks ago 194MB
busybox latest 16ea53ea7c65 6 weeks ago 1.24MB
redis 4-alpine e3dd0e49bca5 18 months ago 20.4MB
nginx 1.14-alpine 8a2fb25a19f5 2 years ago 16MB
nginx 1.14 295c7be07902 2 years ago 109MB
#通过IP:Port连接docker2查看镜像列表
[root@docker ~]# docker -H 192.168.88.102:2375 image ls
REPOSITORY TAG IMAGE ID CREATED SIZE
创建docker网络
#创建bridge网络,
[root@docker2 ~]# docker network create --driver bridge --subnet "172.26.0.0/16" --gateway "172.26.0.1" mybr1
2cd06278691f22fa8c8740919615c622426488ff21366b4c9a8ca19ebafa177b
[root@docker2 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
f70e93784ea5 bridge bridge local
ed8e2d349c13 host host local
2cd06278691f mybr1 bridge local
ecc99e836ca1 none null local
#查看网卡信息
[root@docker2 ~]# ifconfig
br-2cd06278691f: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.26.0.1 netmask 255.255.0.0 broadcast 172.26.255.255
ether 02:42:d0:9c:8c:ef txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
#修改网络名称(修改时必须先down掉网卡,否则会出现以下提示)
[root@docker2 ~]# ip link set dev br-2cd06278691f name docker1
RTNETLINK answers: Device or resource busy
#启动容器加入mybr1网络
[root@docker2 ~]# docker container run -it --network mybr1 busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:1A:00:02
inet addr:172.26.0.2 Bcast:172.26.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:11 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:946 (946.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
#不指定网络,默认使用docker0网络
[root@docker2 ~]# docker container run -it busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:0A:00:00:02
inet addr:10.0.0.2 Bcast:10.0.255.255 Mask:255.255.0.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:11 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:946 (946.0 B) TX bytes:0 (0.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
docker跨主机网络
- macvlan
- overlay
maclan实现
#在两台宿主机上分别执行以下语句,实现macvlan的创建
[root@docker1 ~]# docker network create --driver macvlan --subnet=10.0.0.0/24 --gateway=10.0.0.254 -o parent=ens33 macvlan_1
3115b61496f5d2dbf2729bf61676ba57528a2abecf1b7fb00269614a9545e02d
#docker1启动容器
[root@docker1 ~]# docker container run -it --network macvlan_1 --ip 10.0.0.15 centos:6.10
#docker2启动容器,并使用--ip指定IP
[root@docker2 ~]# docker container run -it --network macvlan_1 --ip 10.0.0.16 centos:6.10
#在docker1中的容器ping一下docker2中的容器
#[root@d93934406ed2 /]# ping 10.0.0.16
PING 10.0.0.16 (10.0.0.16) 56(84) bytes of data.
64 bytes from 10.0.0.16: icmp_seq=1 ttl=64 time=0.745 ms
64 bytes from 10.0.0.16: icmp_seq=2 ttl=64 time=0.349 ms
64 bytes from 10.0.0.16: icmp_seq=3 ttl=64 time=0.306 ms
64 bytes from 10.0.0.16: icmp_seq=4 ttl=64 time=0.322 ms
overlay实现
#1.启动consul服务,实现网络的统一配置管理
[root@docker1 ~]# docker container run -d -p 8500:8500 -h consul progrium/consul -server -bootstrap
#2.修改配置文件
{
"registry-mirrors" : [
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com",
"https://cr.console.aliyun.com/"],
"insecure-registries":["192.168.88.101:5000","192.168.88.101"],
"live-restore":true,
#以下为新增加配置
"hosts":["tcp://0.0.0.0:2376","unix:///var/run/docker.sock"],
"cluster-store":"consul://192.168.88.101:8500",
"cluster-advertise":"192.168.88.101:2376"
}
错误:
#修改/etc/docker/daemon.json中的hosts配置后,docker服务无法启动
Oct 26 22:29:18 docker dockerd: unable to configure the Docker daemon with file /etc/docker/daemon.json: the following directives are specified both as a flag and in the configuration file: hosts: (from flag: [fd://], from file: [tcp://0.0.0.0:2375 unix:///var/run/docker.sock])
解决方法:
#修改/usr/lib/systemd/system/docker.service文件中的ExecStart参数
#将ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock修改为以下
ExecStart=/usr/bin/dockerd
[root@docker ~]# systemctl daemon-reload
[root@docker ~]# systemctl start docker