iptable docker forward 如何设置 iptables docker_IP

 

 

root@ #  iptables -vnL cali-PREROUTING  -t nat
Chain cali-PREROUTING (1 references)
 pkts bytes target     prot opt in     out     source               destination         
 105K   15M cali-fip-dnat  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:r6XmIziWUJsdOK6Z */
root@ #  iptables -vnL PREROUTING  -t nat
Chain PREROUTING (policy ACCEPT 33 packets, 2938 bytes)
 pkts bytes target     prot opt in     out     source               destination         
  588 49101 cali-PREROUTING  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:6gwbT8clXdHdC1b1 */
  578 48301 KUBE-SERVICES  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
 7144  430K CNI-HOSTPORT-DNAT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
 5258  316K DOCKER     all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
root@: #

 

 

root # iptables -t nat  -L PREROUTING  -n --line-number
Chain PREROUTING (policy ACCEPT)
num  target     prot opt source               destination         
1    cali-PREROUTING  all  --  0.0.0.0/0            0.0.0.0/0            /* cali:6gwbT8clXdHdC1b1 */
2    DNAT       tcp  --  0.0.0.0/0            10.10.16.48          tcp dpt:6000 to:172.17.0.4:6000
3    KUBE-SERVICES  all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
4    CNI-HOSTPORT-DNAT  all  --  0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
5    DOCKER     all  --  0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
root # iptables -t nat  -D PREROUTING 2
roo # iptables -t nat  -L cali-PREROUTING -n --line-number
Chain cali-PREROUTING (1 references)
num  target     prot opt source               destination         
1    cali-fip-dnat  all  --  0.0.0.0/0            0.0.0.0/0            /* cali:r6XmIziWUJsdOK6Z */
root # iptables -t nat  -L   cali-fip-dnat  -n --line-number
Chain cali-fip-dnat (2 references)
num  target     prot opt source               destination         
root # netstat -lpn | grep 6000
root #

 

 

 

root@ # iptables -t nat  -L PREROUTING  -n --line-number
Chain PREROUTING (policy ACCEPT)
num  target     prot opt source               destination         
1    cali-PREROUTING  all  --  0.0.0.0/0            0.0.0.0/0            /* cali:6gwbT8clXdHdC1b1 */
2    KUBE-SERVICES  all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
3    CNI-HOSTPORT-DNAT  all  --  0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
4    DOCKER     all  --  0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
root@ #  iptables -vnL cali-PREROUTING  -t nat
Chain cali-PREROUTING (1 references)
 pkts bytes target     prot opt in     out     source               destination         
 105K   15M cali-fip-dnat  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:r6XmIziWUJsdOK6Z */
root@ #  iptables -vnL PREROUTING  -t nat
Chain PREROUTING (policy ACCEPT 33 packets, 2938 bytes)
 pkts bytes target     prot opt in     out     source               destination         
  588 49101 cali-PREROUTING  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:6gwbT8clXdHdC1b1 */
  578 48301 KUBE-SERVICES  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
 7144  430K CNI-HOSTPORT-DNAT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
 5258  316K DOCKER     all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
root # iptables -t nat  -L KUBE-SERVICES -n --line-number
Chain KUBE-SERVICES (2 references)
num  target     prot opt source               destination         
1    KUBE-MARK-MASQ  tcp  -- !10.244.0.0/16        10.96.0.1            /* default/kubernetes:https cluster IP */ tcp dpt:443
2    KUBE-SVC-NPX46M4PTMTKRN6Y  tcp  --  0.0.0.0/0            10.96.0.1            /* default/kubernetes:https cluster IP */ tcp dpt:443
3    KUBE-MARK-MASQ  udp  -- !10.244.0.0/16        10.96.0.10           /* kube-system/kube-dns:dns cluster IP */ udp dpt:53
4    KUBE-SVC-TCOU7JCQXEZGVUNU  udp  --  0.0.0.0/0            10.96.0.10           /* kube-system/kube-dns:dns cluster IP */ udp dpt:53
5    KUBE-MARK-MASQ  tcp  -- !10.244.0.0/16        10.96.0.10           /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53
6    KUBE-SVC-ERIFXISQEP7F7OF4  tcp  --  0.0.0.0/0            10.96.0.10           /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53
7    KUBE-MARK-MASQ  tcp  -- !10.244.0.0/16        10.96.0.10           /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153
8    KUBE-SVC-JD5MR3NA4I4DYORP  tcp  --  0.0.0.0/0            10.96.0.10           /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153
9    KUBE-NODEPORTS  all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL
root@pcl-01:/home/pcl# iptables -t nat  -L  CNI-HOSTPORT-DNAT -n --line-number
Chain CNI-HOSTPORT-DNAT (2 references)
num  target     prot opt source               destination         
root@pcl-01:/home/pcl# iptables -t nat  -L  DOCKER  -n --line-number
Chain DOCKER (2 references)
num  target     prot opt source               destination         
1    RETURN     all  --  0.0.0.0/0            0.0.0.0/0           
root #

 

方说我们使用docker容器,一定会在iptables的NAT表中看到下在这样的一条配置规则:

-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER

从整体上看,这条规则是要把符合什么匹配规则的数据包,在数据包进入NAT表PREROUTING链时,让它直接jump到一个名为DOCKER的链。至于在这个DOCKER的链中有哪些继续生效的NAT规则,不是我们要讨论的。

 

我们主要是分析一下“-m addrtype --dst-type”的数据包匹配规则该做怎样的理解

首先是,-m addrtype。

iptables提供了众多的扩展模块,以支持更多的功能。addrtype就是这样的一个扩展模块,提供的是Address type match的功能。引用的方式就是 -m 模块名。

对于iptables扩展模块应用的最多的莫过于在INPUT表中的类似下面这个规则了:

-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT

注:至于conntrack和state两个模块的异同,其实没有什么差别,效果一致。

其次是,--dst-type

我们可以通过查看addrtype模块的使用说明来了解--dst-type选项的使用方法以及取值范围。

 

从上面的内容可以看到该模块支持按源地址或目标地址类型去做匹配,支持的地址类型有很多种,比如LOCAL表示是本地网络地址,BROADCAST表示匹配广播地址,以及其它各种特殊用途的地址类型。

以回到开头的那条规则上,其作用就是:把目标地址类型属于主机系统的本地网络地址的数据包,在数据包进入NAT表PREROUTING链时,都让它们直接jump到一个名为DOCKER的链。

 

root # iptables -S
-P INPUT ACCEPT
-P FORWARD DROP   --默认是Drop
-P OUTPUT ACCEPT
-N DOCKER
-N DOCKER-ISOLATION-STAGE-1
-N DOCKER-ISOLATION-STAGE-2
-N DOCKER-USER
-N KUBE-EXTERNAL-SERVICES
-N KUBE-FIREWALL
-N KUBE-FORWARD
-N KUBE-SERVICES
-N cali-FORWARD
-N cali-INPUT
-N cali-OUTPUT
-N cali-failsafe-in
-N cali-failsafe-out
-N cali-from-hep-forward
-N cali-from-host-endpoint
-N cali-from-wl-dispatch
-N cali-fw-cali264323708e2
-N cali-fw-calic6133e3c424
-N cali-pri-_u2Tn2rSoAPffvE7JO6
-N cali-pri-kns.kube-system
-N cali-pro-_u2Tn2rSoAPffvE7JO6
-N cali-pro-kns.kube-system
-N cali-to-hep-forward
-N cali-to-host-endpoint
-N cali-to-wl-dispatch
-N cali-tw-cali264323708e2
-N cali-tw-calic6133e3c424
-N cali-wl-to-host
-A INPUT -m comment --comment "cali:Cz_u1IQiXIMmKD4c" -j cali-INPUT
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -j KUBE-FIREWALL
-A FORWARD -m comment --comment "cali:wUHhoiAYhphO9Mso" -j cali-FORWARD
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A FORWARD -j DOCKER-USER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -s 10.244.0.0/16 -j ACCEPT
-A FORWARD -d 10.244.0.0/16 -j ACCEPT
-A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN

新增一条规则

iptables -A DOCKER -d 172.17.0.4/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 6000 -j ACCEPT
root # iptables -A DOCKER -d 172.17.0.4/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 6000 -j ACCEPT
root : # iptables -vnL  PREROUTING -t nat
Chain PREROUTING (policy ACCEPT 4 packets, 160 bytes)
 pkts bytes target     prot opt in     out     source               destination         
 3169  228K cali-PREROUTING  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* cali:6gwbT8clXdHdC1b1 */
   27  1620 DNAT       tcp  --  *      *       0.0.0.0/0            10.10.16.48          tcp dpt:6000 to:172.17.0.4:6000
 5286  378K KUBE-SERVICES  all  --  *      *       0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
 7147  430K CNI-HOSTPORT-DNAT  all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
 5261  316K DOCKER     all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL
root : # iptables -vnL  DOCKER
Chain DOCKER (1 references)
 pkts bytes target     prot opt in     out     source               destination         
    1    60 ACCEPT     tcp  --  !docker0 docker0  0.0.0.0/0            172.17.0.4           tcp dpt:6000
root@pcl-01:/home/pcl#

 

 

client

 

iptable docker forward 如何设置 iptables docker_IP_02

 

conntrack表

conntrack -L  | grep 10.10.16.81
tcp      6 86259 ESTABLISHED src=10.10.16.81 dst=10.10.16.48 sport=51030 dport=6000
 src=172.17.0.4 dst=10.10.16.81 sport=6000 dport=51030 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.

 

server

 

iptable docker forward 如何设置 iptables docker_NAT_03

 

 

 

 

问题
docker固定容器ip前提是设置net为none,此情景下所有的网络配置都失效,包括-p端口映射。

目的
使用其他的方法做端口映射,绕过net为none

方法
docker的端口映射并不是在docker技术中实现的,而是通过宿主机的iptables来实现;通过控制网桥来做端口映射,类似路由器中设置路由端口映射。

先检查配置端口映射,iptable设置了什么
执行:docker run -d -p 9000:9000 redis_cluster 9000

root@ubuntu:~# iptables -t nat -L -n
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
DOCKER all -- 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL

Chain INPUT (policy ACCEPT)
target prot opt source destination

Chain OUTPUT (policy ACCEPT)
target prot opt source destination
DOCKER all -- 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL

Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0
MASQUERADE tcp -- 172.17.0.1 172.17.0.1 tcp dpt:9000

Chain DOCKER (2 references)
target prot opt source destination
DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:9000 to:172.17.0.1:9000

那么我们就可以自己写DNAT的命令,让外部的端口进行转换… docker创建了一个名为DOKCER的自定义的链条Chain … … iptables自定义链条的好处就是可以让防火墙的策略更加的层次化… …

查看命令
root@ubuntu:~# iptables -S
-P INPUT ACCEPT
-P FORWARD ACCEPT
-P OUTPUT ACCEPT
-N DOCKER
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A DOCKER -d 172.17.0.1/32 ! -i docker0 -o docker0 -p tcp -m tcp --dport 9000 -j ACCEPT
root@ubuntu:~# iptables -t nat -S
-P PREROUTING ACCEPT
-P INPUT ACCEPT
-P OUTPUT ACCEPT
-P POSTROUTING ACCEPT
-N DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -p tcp -m tcp --dport 80 -j DNAT --to-destination 172.17.0.1:80
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 172.17.0.1/32 -d 172.17.0.1/32 -p tcp -m tcp --dport 9000 -j MASQUERADE
-A DOCKER -p tcp -m tcp --dport 9000 -j DNAT --to-destination 172.17.0.1:9000

执行DNAT命令iptables -t nat -A PREROUTING -p tcp –dport 80 -j DNAT –to 172.17.0.1:80 将宿主的80端口映射到docker容器的172.17.0.1的80端口

 

 

root # conntrack -L  | grep 10.10.16.81
tcp      6 86259 ESTABLISHED src=10.10.16.81 dst=10.10.16.48 sport=51030 dport=6000 src=172.17.0.4 dst=10.10.16.81 sport=6000 dport=51030 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.
root # conntrack -L  | grep 172.17.0.4
tcp      6 83 TIME_WAIT src=10.10.16.1 dst=10.10.16.48 sport=7471 dport=6000 src=172.17.0.4 dst=10.10.16.1 sport=6000 dport=7471 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.
root # conntrack -L  | grep 172.17.0.4
tcp      6 69 TIME_WAIT src=10.10.16.1 dst=10.10.16.48 sport=7471 dport=6000 src=172.17.0.4 dst=10.10.16.1 sport=6000 dport=7471 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.
root l# conntrack -L  | grep 172.17.0.4
tcp      6 67 TIME_WAIT src=10.10.16.1 dst=10.10.16.48 sport=7471 dport=6000 src=172.17.0.4 dst=10.10.16.1 sport=6000 dport=7471 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.
root  # conntrack -L  | grep 172.17.0.4
tcp      6 55 TIME_WAIT src=10.10.16.1 dst=10.10.16.48 sport=7471 dport=6000 src=172.17.0.4 dst=10.10.16.1 sport=6000 dport=7471 [ASSURED] mark=0 use=1
conntrack v1.4.4 (conntrack-tools): 206 flow entries have been shown.