PRE环境配置endpointRoutes模式
- 初始化命令
# pre helm install cilium cilium/cilium --version 1.9.10 \ --namespace kube-system \ --set tunnel=disabled \ --set endpointRoutes.enabled=true \ --set bpf.hostRouting=true \ --set ipMasqAgent.enabled=true \ --set prometheus.enabled=true \ --set operator.prometheus.enabled=true \ --set kubeProxyReplacement=strict \ --set loadBalancer.mode=hybrid \ --set hostServices.enabled=true \ --set nativeRoutingCIDR=172.20.0.0/20 \ --set ipam.mode=kubernetes \ --set ipam.operator.clusterPoolIPv4PodCIDR=172.20.0.0/20 \ --set ipam.operator.clusterPoolIPv4MaskSize=24 \ --set k8sServiceHost=pre-apiserver.qiangyun.com \ --set k8sServicePort=6443
- cilium-agent日志输出
<root@PRE-BE-K8S-WN2 ~># docker logs -f 99fa level=info msg="Skipped reading configuration file" reason="Config File \"ciliumd\" Not Found in \"[/root]\"" subsys=config level=info msg="Started gops server" address="127.0.0.1:9890" subsys=daemon level=info msg="Memory available for map entries (0.003% of 33130958848B): 82827397B" subsys=config level=info msg="option bpf-ct-global-tcp-max set by dynamic sizing to 290622" subsys=config level=info msg="option bpf-ct-global-any-max set by dynamic sizing to 145311" subsys=config level=info msg="option bpf-nat-global-max set by dynamic sizing to 290622" subsys=config level=info msg="option bpf-neigh-global-max set by dynamic sizing to 290622" subsys=config level=info msg="option bpf-sock-rev-map-max set by dynamic sizing to 145311" subsys=config level=info msg=" --agent-health-port='9876'" subsys=daemon level=info msg=" --agent-labels=''" subsys=daemon level=info msg=" --allow-icmp-frag-needed='true'" subsys=daemon level=info msg=" --allow-localhost='auto'" subsys=daemon level=info msg=" --annotate-k8s-node='true'" subsys=daemon level=info msg=" --api-rate-limit='map[]'" subsys=daemon level=info msg=" --arping-refresh-period='5m0s'" subsys=daemon level=info msg=" --auto-create-cilium-node-resource='true'" subsys=daemon level=info msg=" --auto-direct-node-routes='false'" subsys=daemon level=info msg=" --blacklist-conflicting-routes='false'" subsys=daemon level=info msg=" --bpf-compile-debug='false'" subsys=daemon level=info msg=" --bpf-ct-global-any-max='262144'" subsys=daemon level=info msg=" --bpf-ct-global-tcp-max='524288'" subsys=daemon level=info msg=" --bpf-ct-timeout-regular-any='1m0s'" subsys=daemon level=info msg=" --bpf-ct-timeout-regular-tcp='6h0m0s'" subsys=daemon level=info msg=" --bpf-ct-timeout-regular-tcp-fin='10s'" subsys=daemon level=info msg=" --bpf-ct-timeout-regular-tcp-syn='1m0s'" subsys=daemon level=info msg=" --bpf-ct-timeout-service-any='1m0s'" subsys=daemon level=info msg=" --bpf-ct-timeout-service-tcp='6h0m0s'" subsys=daemon level=info msg=" --bpf-fragments-map-max='8192'" subsys=daemon level=info msg=" --bpf-lb-acceleration='disabled'" subsys=daemon level=info msg=" --bpf-lb-algorithm='random'" subsys=daemon level=info msg=" --bpf-lb-maglev-hash-seed='JLfvgnHc2kaSUFaI'" subsys=daemon level=info msg=" --bpf-lb-maglev-table-size='16381'" subsys=daemon level=info msg=" --bpf-lb-map-max='65536'" subsys=daemon level=info msg=" --bpf-lb-mode='snat'" subsys=daemon level=info msg=" --bpf-map-dynamic-size-ratio='0.0025'" subsys=daemon level=info msg=" --bpf-nat-global-max='524288'" subsys=daemon level=info msg=" --bpf-neigh-global-max='524288'" subsys=daemon level=info msg=" --bpf-policy-map-max='16384'" subsys=daemon level=info msg=" --bpf-root=''" subsys=daemon level=info msg=" --bpf-sock-rev-map-max='262144'" subsys=daemon level=info msg=" --certificates-directory='/var/run/cilium/certs'" subsys=daemon level=info msg=" --cgroup-root='/run/cilium/cgroupv2'" subsys=daemon level=info msg=" --cluster-id=''" subsys=daemon level=info msg=" --cluster-name='default'" subsys=daemon level=info msg=" --clustermesh-config='/var/lib/cilium/clustermesh/'" subsys=daemon level=info msg=" --cmdref=''" subsys=daemon level=info msg=" --config=''" subsys=daemon level=info msg=" --config-dir='/tmp/cilium/config-map'" subsys=daemon level=info msg=" --conntrack-gc-interval='0s'" subsys=daemon level=info msg=" --crd-wait-timeout='5m0s'" subsys=daemon level=info msg=" --datapath-mode='veth'" subsys=daemon level=info msg=" --debug='false'" subsys=daemon level=info msg=" --debug-verbose=''" subsys=daemon level=info msg=" --device=''" subsys=daemon level=info msg=" --devices=''" subsys=daemon level=info msg=" --direct-routing-device=''" subsys=daemon level=info msg=" --disable-cnp-status-updates='true'" subsys=daemon level=info msg=" --disable-conntrack='false'" subsys=daemon level=info msg=" --disable-endpoint-crd='false'" subsys=daemon level=info msg=" --disable-envoy-version-check='false'" subsys=daemon level=info msg=" --disable-iptables-feeder-rules=''" subsys=daemon level=info msg=" --dns-max-ips-per-restored-rule='1000'" subsys=daemon level=info msg=" --egress-masquerade-interfaces=''" subsys=daemon level=info msg=" --egress-multi-home-ip-rule-compat='false'" subsys=daemon level=info msg=" --enable-auto-protect-node-port-range='true'" subsys=daemon level=info msg=" --enable-bandwidth-manager='false'" subsys=daemon level=info msg=" --enable-bpf-clock-probe='true'" subsys=daemon level=info msg=" --enable-bpf-masquerade='true'" subsys=daemon level=info msg=" --enable-bpf-tproxy='false'" subsys=daemon level=info msg=" --enable-endpoint-health-checking='true'" subsys=daemon level=info msg=" --enable-endpoint-routes='true'" subsys=daemon level=info msg=" --enable-external-ips='true'" subsys=daemon level=info msg=" --enable-health-check-nodeport='true'" subsys=daemon level=info msg=" --enable-health-checking='true'" subsys=daemon level=info msg=" --enable-host-firewall='false'" subsys=daemon level=info msg=" --enable-host-legacy-routing='true'" subsys=daemon level=info msg=" --enable-host-port='true'" subsys=daemon level=info msg=" --enable-host-reachable-services='true'" subsys=daemon level=info msg=" --enable-hubble='true'" subsys=daemon level=info msg=" --enable-identity-mark='true'" subsys=daemon level=info msg=" --enable-ip-masq-agent='true'" subsys=daemon level=info msg=" --enable-ipsec='false'" subsys=daemon level=info msg=" --enable-ipv4='true'" subsys=daemon level=info msg=" --enable-ipv4-fragment-tracking='true'" subsys=daemon level=info msg=" --enable-ipv6='false'" subsys=daemon level=info msg=" --enable-ipv6-ndp='false'" subsys=daemon level=info msg=" --enable-k8s-api-discovery='false'" subsys=daemon level=info msg=" --enable-k8s-endpoint-slice='true'" subsys=daemon level=info msg=" --enable-k8s-event-handover='false'" subsys=daemon level=info msg=" --enable-l7-proxy='true'" subsys=daemon level=info msg=" --enable-local-node-route='true'" subsys=daemon level=info msg=" --enable-local-redirect-policy='false'" subsys=daemon level=info msg=" --enable-monitor='true'" subsys=daemon level=info msg=" --enable-node-port='false'" subsys=daemon level=info msg=" --enable-policy='default'" subsys=daemon level=info msg=" --enable-remote-node-identity='true'" subsys=daemon level=info msg=" --enable-selective-regeneration='true'" subsys=daemon level=info msg=" --enable-session-affinity='true'" subsys=daemon level=info msg=" --enable-svc-source-range-check='true'" subsys=daemon level=info msg=" --enable-tracing='false'" subsys=daemon level=info msg=" --enable-well-known-identities='false'" subsys=daemon level=info msg=" --enable-xt-socket-fallback='true'" subsys=daemon level=info msg=" --encrypt-interface=''" subsys=daemon level=info msg=" --encrypt-node='false'" subsys=daemon level=info msg=" --endpoint-interface-name-prefix='lxc+'" subsys=daemon level=info msg=" --endpoint-queue-size='25'" subsys=daemon level=info msg=" --endpoint-status=''" subsys=daemon level=info msg=" --envoy-log=''" subsys=daemon level=info msg=" --exclude-local-address=''" subsys=daemon level=info msg=" --fixed-identity-mapping='map[]'" subsys=daemon level=info msg=" --flannel-master-device=''" subsys=daemon level=info msg=" --flannel-uninstall-on-exit='false'" subsys=daemon level=info msg=" --force-local-policy-eval-at-source='true'" subsys=daemon level=info msg=" --gops-port='9890'" subsys=daemon level=info msg=" --host-reachable-services-protos='tcp,udp'" subsys=daemon level=info msg=" --http-403-msg=''" subsys=daemon level=info msg=" --http-idle-timeout='0'" subsys=daemon level=info msg=" --http-max-grpc-timeout='0'" subsys=daemon level=info msg=" --http-normalize-path='true'" subsys=daemon level=info msg=" --http-request-timeout='3600'" subsys=daemon level=info msg=" --http-retry-count='3'" subsys=daemon level=info msg=" --http-retry-timeout='0'" subsys=daemon level=info msg=" --hubble-disable-tls='false'" subsys=daemon level=info msg=" --hubble-event-queue-size='0'" subsys=daemon level=info msg=" --hubble-flow-buffer-size='4095'" subsys=daemon level=info msg=" --hubble-listen-address=':4244'" subsys=daemon level=info msg=" --hubble-metrics=''" subsys=daemon level=info msg=" --hubble-metrics-server=''" subsys=daemon level=info msg=" --hubble-socket-path='/var/run/cilium/hubble.sock'" subsys=daemon level=info msg=" --hubble-tls-cert-file='/var/lib/cilium/tls/hubble/server.crt'" subsys=daemon level=info msg=" --hubble-tls-client-ca-files='/var/lib/cilium/tls/hubble/client-ca.crt'" subsys=daemon level=info msg=" --hubble-tls-key-file='/var/lib/cilium/tls/hubble/server.key'" subsys=daemon level=info msg=" --identity-allocation-mode='crd'" subsys=daemon level=info msg=" --identity-change-grace-period='5s'" subsys=daemon level=info msg=" --install-iptables-rules='true'" subsys=daemon level=info msg=" --ip-allocation-timeout='2m0s'" subsys=daemon level=info msg=" --ip-masq-agent-config-path='/etc/config/ip-masq-agent'" subsys=daemon level=info msg=" --ipam='kubernetes'" subsys=daemon level=info msg=" --ipsec-key-file=''" subsys=daemon level=info msg=" --iptables-lock-timeout='5s'" subsys=daemon level=info msg=" --iptables-random-fully='false'" subsys=daemon level=info msg=" --ipv4-node='auto'" subsys=daemon level=info msg=" --ipv4-pod-subnets=''" subsys=daemon level=info msg=" --ipv4-range='auto'" subsys=daemon level=info msg=" --ipv4-service-loopback-address='169.254.42.1'" subsys=daemon level=info msg=" --ipv4-service-range='auto'" subsys=daemon level=info msg=" --ipv6-cluster-alloc-cidr='f00d::/64'" subsys=daemon level=info msg=" --ipv6-mcast-device=''" subsys=daemon level=info msg=" --ipv6-node='auto'" subsys=daemon level=info msg=" --ipv6-pod-subnets=''" subsys=daemon level=info msg=" --ipv6-range='auto'" subsys=daemon level=info msg=" --ipv6-service-range='auto'" subsys=daemon level=info msg=" --ipvlan-master-device='undefined'" subsys=daemon level=info msg=" --join-cluster='false'" subsys=daemon level=info msg=" --k8s-api-server=''" subsys=daemon level=info msg=" --k8s-force-json-patch='false'" subsys=daemon level=info msg=" --k8s-heartbeat-timeout='30s'" subsys=daemon level=info msg=" --k8s-kubeconfig-path=''" subsys=daemon level=info msg=" --k8s-namespace='kube-system'" subsys=daemon level=info msg=" --k8s-require-ipv4-pod-cidr='false'" subsys=daemon level=info msg=" --k8s-require-ipv6-pod-cidr='false'" subsys=daemon level=info msg=" --k8s-service-cache-size='128'" subsys=daemon level=info msg=" --k8s-service-proxy-name=''" subsys=daemon level=info msg=" --k8s-sync-timeout='3m0s'" subsys=daemon level=info msg=" --k8s-watcher-endpoint-selector='metadata.name!=kube-scheduler,metadata.name!=kube-controller-manager,metadata.name!=etcd-operator,metadata.name!=gcp-controller-manager'" subsys=daemon level=info msg=" --k8s-watcher-queue-size='1024'" subsys=daemon level=info msg=" --keep-config='false'" subsys=daemon level=info msg=" --kube-proxy-replacement='strict'" subsys=daemon level=info msg=" --kube-proxy-replacement-healthz-bind-address=''" subsys=daemon level=info msg=" --kvstore=''" subsys=daemon level=info msg=" --kvstore-connectivity-timeout='2m0s'" subsys=daemon level=info msg=" --kvstore-lease-ttl='15m0s'" subsys=daemon level=info msg=" --kvstore-opt='map[]'" subsys=daemon level=info msg=" --kvstore-periodic-sync='5m0s'" subsys=daemon level=info msg=" --label-prefix-file=''" subsys=daemon level=info msg=" --labels=''" subsys=daemon level=info msg=" --lib-dir='/var/lib/cilium'" subsys=daemon level=info msg=" --log-driver=''" subsys=daemon level=info msg=" --log-opt='map[]'" subsys=daemon level=info msg=" --log-system-load='false'" subsys=daemon level=info msg=" --masquerade='true'" subsys=daemon level=info msg=" --max-controller-interval='0'" subsys=daemon level=info msg=" --metrics=''" subsys=daemon level=info msg=" --monitor-aggregation='medium'" subsys=daemon level=info msg=" --monitor-aggregation-flags='all'" subsys=daemon level=info msg=" --monitor-aggregation-interval='5s'" subsys=daemon level=info msg=" --monitor-queue-size='0'" subsys=daemon level=info msg=" --mtu='0'" subsys=daemon level=info msg=" --nat46-range='0:0:0:0:0:FFFF::/96'" subsys=daemon level=info msg=" --native-routing-cidr='172.20.0.0/20'" subsys=daemon level=info msg=" --node-port-acceleration='disabled'" subsys=daemon level=info msg=" --node-port-algorithm='random'" subsys=daemon level=info msg=" --node-port-bind-protection='true'" subsys=daemon level=info msg=" --node-port-mode='hybrid'" subsys=daemon level=info msg=" --node-port-range='30000,32767'" subsys=daemon level=info msg=" --policy-audit-mode='false'" subsys=daemon level=info msg=" --policy-queue-size='100'" subsys=daemon level=info msg=" --policy-trigger-interval='1s'" subsys=daemon level=info msg=" --pprof='false'" subsys=daemon level=info msg=" --preallocate-bpf-maps='false'" subsys=daemon level=info msg=" --prefilter-device='undefined'" subsys=daemon level=info msg=" --prefilter-mode='native'" subsys=daemon level=info msg=" --prepend-iptables-chains='true'" subsys=daemon level=info msg=" --prometheus-serve-addr=':9090'" subsys=daemon level=info msg=" --proxy-connect-timeout='1'" subsys=daemon level=info msg=" --proxy-prometheus-port='9095'" subsys=daemon level=info msg=" --read-cni-conf=''" subsys=daemon level=info msg=" --restore='true'" subsys=daemon level=info msg=" --sidecar-istio-proxy-image='cilium/istio_proxy'" subsys=daemon level=info msg=" --single-cluster-route='false'" subsys=daemon level=info msg=" --skip-crd-creation='false'" subsys=daemon level=info msg=" --socket-path='/var/run/cilium/cilium.sock'" subsys=daemon level=info msg=" --sockops-enable='false'" subsys=daemon level=info msg=" --state-dir='/var/run/cilium'" subsys=daemon level=info msg=" --tofqdns-dns-reject-response-code='refused'" subsys=daemon level=info msg=" --tofqdns-enable-dns-compression='true'" subsys=daemon level=info msg=" --tofqdns-endpoint-max-ip-per-hostname='50'" subsys=daemon level=info msg=" --tofqdns-idle-connection-grace-period='0s'" subsys=daemon level=info msg=" --tofqdns-max-deferred-connection-deletes='10000'" subsys=daemon level=info msg=" --tofqdns-min-ttl='0'" subsys=daemon level=info msg=" --tofqdns-pre-cache=''" subsys=daemon level=info msg=" --tofqdns-proxy-port='0'" subsys=daemon level=info msg=" --tofqdns-proxy-response-max-delay='100ms'" subsys=daemon level=info msg=" --trace-payloadlen='128'" subsys=daemon level=info msg=" --tunnel='disabled'" subsys=daemon level=info msg=" --version='false'" subsys=daemon level=info msg=" --write-cni-conf-when-ready=''" subsys=daemon level=info msg=" _ _ _" subsys=daemon level=info msg=" ___|_| |_|_ _ _____" subsys=daemon level=info msg="| _| | | | | | |" subsys=daemon level=info msg="|___|_|_|_|___|_|_|_|" subsys=daemon level=info msg="Cilium 1.9.10 4e26039 2021-09-01T12:57:41-07:00 go version go1.15.15 linux/amd64" subsys=daemon level=info msg="cilium-envoy version: 9b1701da9cc035a1696f3e492ee2526101262e56/1.18.4/Distribution/RELEASE/BoringSSL" subsys=daemon level=info msg="clang (10.0.0) and kernel (5.11.1) versions: OK!" subsys=linux-datapath level=info msg="linking environment: OK!" subsys=linux-datapath level=info msg="Detected mounted BPF filesystem at /sys/fs/bpf" subsys=bpf level=info msg="Mounted cgroupv2 filesystem at /run/cilium/cgroupv2" subsys=cgroups level=info msg="Parsing base label prefixes from default label list" subsys=labels-filter level=info msg="Parsing additional label prefixes from user inputs: []" subsys=labels-filter level=info msg="Final label prefixes to be used for identity evaluation:" subsys=labels-filter level=info msg=" - reserved:.*" subsys=labels-filter level=info msg=" - :io.kubernetes.pod.namespace" subsys=labels-filter level=info msg=" - :io.cilium.k8s.namespace.labels" subsys=labels-filter level=info msg=" - :app.kubernetes.io" subsys=labels-filter level=info msg=" - !:io.kubernetes" subsys=labels-filter level=info msg=" - !:kubernetes.io" subsys=labels-filter level=info msg=" - !:.*beta.kubernetes.io" subsys=labels-filter level=info msg=" - !:k8s.io" subsys=labels-filter level=info msg=" - !:pod-template-generation" subsys=labels-filter level=info msg=" - !:pod-template-hash" subsys=labels-filter level=info msg=" - !:controller-revision-hash" subsys=labels-filter level=info msg=" - !:annotation.*" subsys=labels-filter level=info msg=" - !:etcd_node" subsys=labels-filter level=info msg="Auto-disabling \"enable-bpf-clock-probe\" feature since KERNEL_HZ cannot be determined" error="Cannot probe CONFIG_HZ" subsys=daemon level=info msg="Using autogenerated IPv4 allocation range" subsys=node v4Prefix=10.78.0.0/16 level=info msg="Initializing daemon" subsys=daemon level=info msg="Establishing connection to apiserver" host="https://pre-apiserver.qiangyun.com:6443" subsys=k8s level=info msg="Connected to apiserver" subsys=k8s level=info msg="Trying to auto-enable \"enable-node-port\", \"enable-external-ips\", \"enable-host-reachable-services\", \"enable-host-port\", \"enable-session-affinity\" features" subsys=daemon level=info msg="Inheriting MTU from external network interface" device=eth0 ipAddr=10.1.20.78 mtu=1500 subsys=mtu level=info msg="Restored services from maps" failed=0 restored=0 subsys=service level=info msg="Reading old endpoints..." subsys=daemon level=info msg="Envoy: Starting xDS gRPC server listening on /var/run/cilium/xds.sock" subsys=envoy-manager level=info msg="No old endpoints found." subsys=daemon level=info msg="Waiting until all Cilium CRDs are available" subsys=k8s level=info msg="All Cilium CRDs have been found and are available" subsys=k8s level=info msg="Retrieved node information from kubernetes node" nodeName=pre-be-k8s-wn2 subsys=k8s level=info msg="Received own node information from API server" ipAddr.ipv4=10.1.20.78 ipAddr.ipv6="<nil>" k8sNodeIP=10.1.20.78 labels="map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/env:pre kubernetes.io/hostname:pre-be-k8s-wn2 kubernetes.io/ingress:pre kubernetes.io/os:linux kubernetes.io/resource:pre-base node-role.kubernetes.io/worker:worker topology.diskplugin.csi.alibabacloud.com/zone:cn-hangzhou-h]" nodeName=pre-be-k8s-wn2 subsys=k8s v4Prefix=172.20.4.0/24 v6Prefix="<nil>" level=info msg="Restored router IPs from node information" ipv4=172.20.4.166 ipv6="<nil>" subsys=k8s level=info msg="k8s mode: Allowing localhost to reach local endpoints" subsys=daemon level=info msg="Using auto-derived devices to attach Loadbalancer, Host Firewall or Bandwidth Manager program" devices="[eth0]" directRoutingDevice=eth0 subsys=daemon level=info msg="Enabling k8s event listener" subsys=k8s-watcher level=info msg="Policy Add Request" ciliumNetworkPolicy="[&{EndpointSelector:{\"matchLabels\":{\"k8s:io.kubernetes.pod.namespace\":\"fleet-system\"}} NodeSelector:{} Ingress:[{IngressCommonRule:{FromEndpoints:[{}] FromRequires:[] FromCIDR: FromCIDRSet:[] FromEntities:[] AggregatedSelectors:[]} ToPorts:[]}] IngressDeny:[] Egress:[{EgressCommonRule:{ToEndpoints:[{}] ToRequires:[] ToCIDR: ToCIDRSet:[] ToEntities:[] ToServices:[] ToGroups:[] AggregatedSelectors:[]} ToPorts:[] ToFQDNs:[]}] EgressDeny:[] Labels:[k8s:io.cilium.k8s.policy.derived-from=NetworkPolicy k8s:io.cilium.k8s.policy.name=default-allow-all k8s:io.cilium.k8s.policy.namespace=fleet-system k8s:io.cilium.k8s.policy.uid=6ad94b4d-1f91-44b6-9306-a12fbcfe4f6a] Description:}]" policyAddRequest=a73f43c1-1490-11ec-9d49-00163e18cc27 subsys=daemon level=info msg="Policy imported via API, recalculating..." policyAddRequest=a73f43c1-1490-11ec-9d49-00163e18cc27 policyRevision=2 subsys=daemon level=warning msg="Failed to send policy update as monitor notification" policyAddRequest=a73f43c1-1490-11ec-9d49-00163e18cc27 policyRevision=2 subsys=daemon level=info msg="NetworkPolicy successfully added" k8sApiVersion= k8sNetworkPolicyName=default-allow-all subsys=k8s-watcher level=info msg="Waiting until all pre-existing resources related to policy have been received" subsys=k8s-watcher level=info msg="Removing stale endpoint interfaces" subsys=daemon level=info msg="Skipping kvstore configuration" subsys=daemon level=info msg="Initializing node addressing" subsys=daemon level=info msg="Initializing kubernetes IPAM" subsys=ipam v4Prefix=172.20.4.0/24 v6Prefix="<nil>" level=info msg="Restoring endpoints..." subsys=daemon level=info msg="Endpoints restored" failed=0 restored=0 subsys=daemon level=info msg="Addressing information:" subsys=daemon level=info msg=" Cluster-Name: default" subsys=daemon level=info msg=" Cluster-ID: 0" subsys=daemon level=info msg=" Local node-name: pre-be-k8s-wn2" subsys=daemon level=info msg=" Node-IPv6: <nil>" subsys=daemon level=info msg=" External-Node IPv4: 10.1.20.78" subsys=daemon level=info msg=" Internal-Node IPv4: 172.20.4.166" subsys=daemon level=info msg=" IPv4 allocation prefix: 172.20.4.0/24" subsys=daemon level=info msg=" IPv4 native routing prefix: 172.20.0.0/20" subsys=daemon level=info msg=" Loopback IPv4: 169.254.42.1" subsys=daemon level=info msg=" Local IPv4 addresses:" subsys=daemon level=info msg=" - 10.1.20.78" subsys=daemon level=info msg="Adding local node to cluster" node="{pre-be-k8s-wn2 default [{InternalIP 10.1.20.78} {CiliumInternalIP 172.20.4.166}] 172.20.4.0/24 <nil> 172.20.4.189 <nil> 0 local 0 map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/env:pre kubernetes.io/hostname:pre-be-k8s-wn2 kubernetes.io/ingress:pre kubernetes.io/os:linux kubernetes.io/resource:pre-base node-role.kubernetes.io/worker:worker topology.diskplugin.csi.alibabacloud.com/zone:cn-hangzhou-h] 6}" subsys=nodediscovery level=info msg="Creating or updating CiliumNode resource" node=pre-be-k8s-wn2 subsys=nodediscovery level=info msg="Annotating k8s node" subsys=daemon v4CiliumHostIP.IPv4=172.20.4.166 v4Prefix=172.20.4.0/24 v4healthIP.IPv4=172.20.4.189 v6CiliumHostIP.IPv6="<nil>" v6Prefix="<nil>" v6healthIP.IPv6="<nil>" level=info msg="Initializing identity allocator" subsys=identity-cache level=info msg="Cluster-ID is not specified, skipping ClusterMesh initialization" subsys=daemon level=info msg="Setting up BPF datapath" bpfClockSource=ktime bpfInsnSet=v3 subsys=datapath-loader level=info msg="Setting sysctl" subsys=datapath-loader sysParamName=net.core.bpf_jit_enable sysParamValue=1 level=info msg="Setting sysctl" subsys=datapath-loader sysParamName=net.ipv4.conf.all.rp_filter sysParamValue=0 level=info msg="Setting sysctl" subsys=datapath-loader sysParamName=kernel.unprivileged_bpf_disabled sysParamValue=1 level=info msg="Setting sysctl" subsys=datapath-loader sysParamName=kernel.timer_migration sysParamValue=0 level=info msg="regenerating all endpoints" reason="one or more identities created or deleted" subsys=endpoint-manager level=info msg="All pre-existing resources related to policy have been received; continuing" subsys=k8s-watcher level=info msg="regenerating all endpoints" reason="one or more identities created or deleted" subsys=endpoint-manager level=info msg="regenerating all endpoints" reason= subsys=endpoint-manager level=info msg="Adding new proxy port rules for cilium-dns-egress:45583" proxy port name=cilium-dns-egress subsys=proxy level=info msg="Serving cilium node monitor v1.2 API at unix:///var/run/cilium/monitor1_2.sock" subsys=monitor-agent level=info msg="Validating configured node address ranges" subsys=daemon level=info msg="Starting connection tracking garbage collector" subsys=daemon level=info msg="Starting IP identity watcher" subsys=ipcache level=info msg="Initial scan of connection tracking completed" subsys=ct-gc level=info msg="Regenerating restored endpoints" numRestored=0 subsys=daemon level=info msg="Datapath signal listener running" subsys=signal level=info msg="Creating host endpoint" subsys=daemon level=info msg="Finished regenerating restored endpoints" regenerated=0 subsys=daemon total=0 level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=796 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=796 identityLabels="k8s:node-role.kubernetes.io/worker=worker,k8s:topology.diskplugin.csi.alibabacloud.com/zone=cn-hangzhou-h,reserved:host" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=796 identity=1 identityLabels="k8s:node-role.kubernetes.io/worker=worker,k8s:topology.diskplugin.csi.alibabacloud.com/zone=cn-hangzhou-h,reserved:host" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Config file not found" file-path=/etc/config/ip-masq-agent subsys=ipmasq level=info msg="Adding CIDR" cidr=198.51.100.0/24 subsys=ipmasq level=info msg="Adding CIDR" cidr=203.0.113.0/24 subsys=ipmasq level=info msg="Adding CIDR" cidr=192.88.99.0/24 subsys=ipmasq level=info msg="Adding CIDR" cidr=100.64.0.0/10 subsys=ipmasq level=info msg="Adding CIDR" cidr=198.18.0.0/15 subsys=ipmasq level=info msg="Adding CIDR" cidr=10.0.0.0/8 subsys=ipmasq level=info msg="Adding CIDR" cidr=192.168.0.0/16 subsys=ipmasq level=info msg="Adding CIDR" cidr=240.0.0.0/4 subsys=ipmasq level=info msg="Adding CIDR" cidr=172.16.0.0/12 subsys=ipmasq level=info msg="Adding CIDR" cidr=169.254.0.0/16 subsys=ipmasq level=info msg="Adding CIDR" cidr=192.0.0.0/24 subsys=ipmasq level=info msg="Adding CIDR" cidr=192.0.2.0/24 subsys=ipmasq level=info msg="Launching Cilium health daemon" subsys=daemon level=info msg="Launching Cilium health endpoint" subsys=daemon level=info msg="Serving prometheus metrics on :9090" subsys=daemon level=info msg="Started healthz status API server" address="127.0.0.1:9876" subsys=daemon level=info msg="Initializing Cilium API" subsys=daemon level=info msg="Daemon initialization completed" bootstrapTime=5.492995488s subsys=daemon level=info msg="Serving cilium API at unix:///var/run/cilium/cilium.sock" subsys=daemon level=info msg="Configuring Hubble server" eventQueueSize=4096 maxFlows=4095 subsys=hubble level=info msg="Starting local Hubble server" address="unix:///var/run/cilium/hubble.sock" subsys=hubble level=info msg="Beginning to read perf buffer" startTime="2021-09-13 12:46:49.481933027 +0000 UTC m=+5.589215803" subsys=monitor-agent level=info msg="Starting Hubble server" address=":4244" subsys=hubble level=info msg="Processing API request with rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9a83b4d-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9a83b4d-1490-11ec-9d49-00163e18cc27 waitDurationTotal="21.701µs" level=info msg="Delete endpoint request" id="container-id:fcfbabfb2dbf58d20cc5131c2983baef398188e2760441988035e84c9cf7f92c" subsys=daemon level=info msg="API call has been processed" error="endpoint not found" name=endpoint-delete processingDuration="12.123µs" subsys=rate totalDuration="48.405µs" uuid=a9a83b4d-1490-11ec-9d49-00163e18cc27 waitDurationTotal="21.701µs" level=info msg="Processing API request with rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9ab4105-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9ab4105-1490-11ec-9d49-00163e18cc27 waitDurationTotal="62.827µs" level=info msg="Delete endpoint request" id="container-id:4cdc8ebfd70c415a1d7c427ef1047405fd40c3e58a1943d3977fb563eb2944d6" subsys=daemon level=info msg="API call has been processed" error="endpoint not found" name=endpoint-delete processingDuration="13.994µs" subsys=rate totalDuration="91.697µs" uuid=a9ab4105-1490-11ec-9d49-00163e18cc27 waitDurationTotal="62.827µs" level=info msg="Processing API request with rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9afeb36-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=a9afeb36-1490-11ec-9d49-00163e18cc27 waitDurationTotal="83.267µs" level=info msg="Delete endpoint request" id="container-id:33537e836a2abeabdd28ddd6878bad6f89bc4c59bc0f34893d9633ce2667adce" subsys=daemon level=info msg="API call has been processed" error="endpoint not found" name=endpoint-delete processingDuration="13.432µs" subsys=rate totalDuration="111.549µs" uuid=a9afeb36-1490-11ec-9d49-00163e18cc27 waitDurationTotal="83.267µs" level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9be89f4-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9be89f4-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Create endpoint request" addressing="&{172.20.4.208 a9bbc528-1490-11ec-9d49-00163e18cc27 }" containerID=c468ebbf57d2a9e77705a9c1c4f188b6669c6b4a057c17c083c0f52efc77d917 datapathConfiguration="&{false true false true 0xc00190989a}" interface=lxc24e5b8517683 k8sPodName=kube-system/hubble-relay-7995686985-nx74j labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=166 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=166 identityLabels="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=hubble-relay,k8s:io.kubernetes.pod.namespace=kube-system,k8s:k8s-app=hubble-relay" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=hubble-relay;k8s:io.kubernetes.pod.namespace=kube-system;k8s:k8s-app=hubble-relay;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=hubble-relay;k8s:io.kubernetes.pod.namespace=kube-system;k8s:k8s-app=hubble-relay;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=166 identity=39334 identityLabels="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=hubble-relay,k8s:io.kubernetes.pod.namespace=kube-system,k8s:k8s-app=hubble-relay" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=166 identity=39334 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9bf77d3-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9bf77d3-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Create endpoint request" addressing="&{172.20.4.221 a9be6f4c-1490-11ec-9d49-00163e18cc27 }" containerID=535305ad6da3180dd4d464475fec7507e82cc8b1eb2a5495843512f210d6dd8b datapathConfiguration="&{false true false true 0xc001875a4a}" interface=lxca885da251c83 k8sPodName=pre/pre-eureka-0 labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1022 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1022 identityLabels="k8s:app=pre-eureka,k8s:component=spring,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:part-of=pre,k8s:statefulset.kubernetes.io/pod-name=pre-eureka-0" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=pre-eureka;k8s:component=spring;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:part-of=pre;k8s:statefulset.kubernetes.io/pod-name=pre-eureka-0;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=pre-eureka;k8s:component=spring;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:part-of=pre;k8s:statefulset.kubernetes.io/pod-name=pre-eureka-0;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1022 identity=8409 identityLabels="k8s:app=pre-eureka,k8s:component=spring,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:part-of=pre,k8s:statefulset.kubernetes.io/pod-name=pre-eureka-0" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1022 identity=8409 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9ca9e3e-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9ca9e3e-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Create endpoint request" addressing="&{172.20.4.64 a9c8d528-1490-11ec-9d49-00163e18cc27 }" containerID=e19fd7b6684ae83a1d78d053ae2a9fb0d1b4980ddcf7d9ef0a84c463d9fe274c datapathConfiguration="&{false true false true 0xc000ff160a}" interface=lxcefbb3e74bc15 k8sPodName=pre/pre-eureka-1 labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=2805 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=2805 identityLabels="k8s:app=pre-eureka,k8s:component=spring,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:part-of=pre,k8s:statefulset.kubernetes.io/pod-name=pre-eureka-1" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=pre-eureka;k8s:component=spring;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:part-of=pre;k8s:statefulset.kubernetes.io/pod-name=pre-eureka-1;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=pre-eureka;k8s:component=spring;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:part-of=pre;k8s:statefulset.kubernetes.io/pod-name=pre-eureka-1;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=2805 identity=3595 identityLabels="k8s:app=pre-eureka,k8s:component=spring,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:part-of=pre,k8s:statefulset.kubernetes.io/pod-name=pre-eureka-1" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=2805 identity=3595 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9d5bfbc-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 rateLimiterSkipped=true subsys=rate uuid=a9d5bfbc-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Create endpoint request" addressing="&{172.20.4.142 a9d4cb45-1490-11ec-9d49-00163e18cc27 }" containerID=066c57b642f25986c2e0c1d1e49d0338bde1f1883138e3525dda10071a250fba datapathConfiguration="&{false true false true 0xc001190f9a}" interface=lxc5383617eb851 k8sPodName=pre/pre-xl-job-8bdb7c55c-lkzbj labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1777 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1777 identityLabels="k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:job=pre-xl-job" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:job=pre-xl-job;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:job=pre-xl-job;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1777 identity=53705 identityLabels="k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:job=pre-xl-job" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1777 identity=53705 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9db3c6f-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" burst=4 limit=0.50/s maxWaitDuration=15s maxWaitDurationLimiter=14.999924482s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9db3c6f-1490-11ec-9d49-00163e18cc27 waitDurationLimiter=0s waitDurationTotal="86.161µs" level=info msg="Create endpoint request" addressing="&{172.20.4.37 a9da9f64-1490-11ec-9d49-00163e18cc27 }" containerID=db955e52c521057f3465d1f796fea54c5b5ff04809d9722992495d23fba7db60 datapathConfiguration="&{false true false true 0xc001191cc8}" interface=lxc4485873b7b1d k8sPodName=default/tomcat-85c798b5d5-74n2k labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=822 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=822 identityLabels="k8s:app=tomcat,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=default" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=tomcat;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=default;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=tomcat;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=default;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=822 identity=34367 identityLabels="k8s:app=tomcat,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=default" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=822 identity=34367 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9f57e09-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" burst=4 limit=0.50/s maxWaitDuration=15s maxWaitDurationLimiter=14.999919086s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9f57e09-1490-11ec-9d49-00163e18cc27 waitDurationLimiter=0s waitDurationTotal="92.456µs" level=info msg="Create endpoint request" addressing="&{172.20.4.74 a9f4dd4e-1490-11ec-9d49-00163e18cc27 }" containerID=3c11bfe8b3803682e9f118d9b3730ddbc185d6493e41e1c97975393e643c2916 datapathConfiguration="&{false true false true 0xc0011e94d9}" interface=lxcda02873d8c8a k8sPodName=fleet-system/fleet-agent-6b5f8d9db7-r4n46 labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1616 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1616 identityLabels="k8s:app=fleet-agent,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.namespace.labels.objectset.rio.cattle.io/hash=f399d0b310fbfb28e9667312fdc7a33954e2b8c8,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=fleet-agent,k8s:io.kubernetes.pod.namespace=fleet-system" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=fleet-agent;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.namespace.labels.objectset.rio.cattle.io/hash=f399d0b310fbfb28e9667312fdc7a33954e2b8c8;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=fleet-agent;k8s:io.kubernetes.pod.namespace=fleet-system;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=fleet-agent;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.namespace.labels.objectset.rio.cattle.io/hash=f399d0b310fbfb28e9667312fdc7a33954e2b8c8;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=fleet-agent;k8s:io.kubernetes.pod.namespace=fleet-system;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1616 identity=58598 identityLabels="k8s:app=fleet-agent,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.namespace.labels.objectset.rio.cattle.io/hash=f399d0b310fbfb28e9667312fdc7a33954e2b8c8,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=fleet-agent,k8s:io.kubernetes.pod.namespace=fleet-system" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1616 identity=58598 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9fdecdc-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" burst=4 limit=0.50/s maxWaitDuration=15s maxWaitDurationLimiter=14.999932067s name=endpoint-create parallelRequests=4 subsys=rate uuid=a9fdecdc-1490-11ec-9d49-00163e18cc27 waitDurationLimiter=0s waitDurationTotal="77.404µs" level=info msg="Create endpoint request" addressing="&{172.20.4.109 a9fd56c4-1490-11ec-9d49-00163e18cc27 }" containerID=da4ce885211825ed3e7b964c044653b1beb52626414b1874ccc2af4796e259ad datapathConfiguration="&{false true false true 0xc001789e39}" interface=lxcdf4716dab9ef k8sPodName=kube-system/hubble-ui-769fb95577-gpdll labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3706 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3706 identityLabels="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=hubble-ui,k8s:io.kubernetes.pod.namespace=kube-system,k8s:k8s-app=hubble-ui" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=hubble-ui;k8s:io.kubernetes.pod.namespace=kube-system;k8s:k8s-app=hubble-ui;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=hubble-ui;k8s:io.kubernetes.pod.namespace=kube-system;k8s:k8s-app=hubble-ui;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3706 identity=32233 identityLabels="k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-bg7fm,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=hubble-ui,k8s:io.kubernetes.pod.namespace=kube-system,k8s:k8s-app=hubble-ui" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3706 identity=32233 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=147 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=147 identityLabels="reserved:health" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=147 identity=4 identityLabels="reserved:health" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Compiled new BPF template" BPFCompilationTime=1.462270158s file-path=/var/run/cilium/state/templates/5cb2b94c00a9378a4b880762ce4dafb11956a21e/bpf_lxc.o subsys=datapath-loader level=info msg="Compiled new BPF template" BPFCompilationTime=1.839581944s file-path=/var/run/cilium/state/templates/3bc1e0cb7434f9bf7d272b7b2647343cc556f1dc/bpf_host.o subsys=datapath-loader level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=2805 identity=3595 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=1022 identity=8409 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=1022 identity=8409 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.220056422s subsys=rate totalDuration=2.220132185s uuid=a9bf77d3-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=166 identity=39334 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=2805 identity=3595 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.147246096s subsys=rate totalDuration=2.14732675s uuid=a9ca9e3e-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=166 identity=39334 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.226607292s subsys=rate totalDuration=2.22666038s uuid=a9be89f4-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=1777 identity=53705 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=1777 identity=53705 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.887368587s subsys=rate totalDuration=2.887457051s uuid=a9d5bfbc-1490-11ec-9d49-00163e18cc27 waitDurationTotal=0s level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=1616 identity=58598 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=1616 identity=58598 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.680986598s subsys=rate totalDuration=2.681100528s uuid=a9f57e09-1490-11ec-9d49-00163e18cc27 waitDurationTotal="92.456µs" level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=822 identity=34367 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=822 identity=34367 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=2.863097435s subsys=rate totalDuration=2.863205821s uuid=a9db3c6f-1490-11ec-9d49-00163e18cc27 waitDurationTotal="86.161µs" level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.208 owned by kvstore or agent" k8sNamespace=kube-system k8sPodName=hubble-relay-7995686985-nx74j new-hostIP=172.20.4.208 new-podIP=172.20.4.208 new-podIPs="[{172.20.4.208}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=796 identity=1 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=3706 identity=32233 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=3706 identity=32233 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=3.458511669s subsys=rate totalDuration=3.458610029s uuid=a9fdecdc-1490-11ec-9d49-00163e18cc27 waitDurationTotal="77.404µs" level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=147 identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.142 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-xl-job-8bdb7c55c-lkzbj new-hostIP=172.20.4.142 new-podIP=172.20.4.142 new-podIPs="[{172.20.4.142}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.74 owned by kvstore or agent" k8sNamespace=fleet-system k8sPodName=fleet-agent-6b5f8d9db7-r4n46 new-hostIP=172.20.4.74 new-podIP=172.20.4.74 new-podIPs="[{172.20.4.74}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.109 owned by kvstore or agent" k8sNamespace=kube-system k8sPodName=hubble-ui-769fb95577-gpdll new-hostIP=172.20.4.109 new-podIP=172.20.4.109 new-podIPs="[{172.20.4.109}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.221 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-eureka-0 new-hostIP=172.20.4.221 new-podIP=172.20.4.221 new-podIPs="[{172.20.4.221}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.64 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-eureka-1 new-hostIP=172.20.4.64 new-podIP=172.20.4.64 new-podIPs="[{172.20.4.64}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.37 owned by kvstore or agent" k8sNamespace=default k8sPodName=tomcat-85c798b5d5-74n2k new-hostIP=172.20.4.37 new-podIP=172.20.4.37 new-podIPs="[{172.20.4.37}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.208 owned by kvstore or agent" k8sNamespace=kube-system k8sPodName=hubble-relay-7995686985-nx74j new-hostIP=172.20.4.208 new-podIP=172.20.4.208 new-podIPs="[{172.20.4.208}]" old-hostIP=172.20.4.208 old-podIP=172.20.4.208 old-podIPs="[{172.20.4.208}]" subsys=k8s-watcher level=info msg="Processing API request with rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=b1d8785a-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=b1d8785a-1490-11ec-9d49-00163e18cc27 waitDurationTotal="66.959µs" level=info msg="Delete endpoint request" id="container-id:d1142408fb7c54b54946829a2b6a0d05a5e6bebc53e55cae58014012d79cfff6" subsys=daemon level=info msg="API call has been processed" error="endpoint not found" name=endpoint-delete processingDuration="14.074µs" subsys=rate totalDuration="94.836µs" uuid=b1d8785a-1490-11ec-9d49-00163e18cc27 waitDurationTotal="66.959µs" level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=3 subsys=rate uuid=b21ff288-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" burst=4 limit=0.38/s maxWaitDuration=15s maxWaitDurationLimiter=14.99991856s name=endpoint-create parallelRequests=3 subsys=rate uuid=b21ff288-1490-11ec-9d49-00163e18cc27 waitDurationLimiter=0s waitDurationTotal="91.463µs" level=info msg="Create endpoint request" addressing="&{172.20.4.193 b21d092e-1490-11ec-9d49-00163e18cc27 }" containerID=48dd77e1b5a418ff96e5377e80d9ee49dddc88e81b2e6759bfce214ae81bedfa datapathConfiguration="&{false true false true 0xc002309dc9}" interface=lxc8465f5f246ea k8sPodName=pre/pre-rabbitmq-1 labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=85 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=85 identityLabels="k8s:app=pre-rabbitmq,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=rabbitmq,k8s:io.kubernetes.pod.namespace=pre,k8s:statefulset.kubernetes.io/pod-name=pre-rabbitmq-1" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=pre-rabbitmq;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=rabbitmq;k8s:io.kubernetes.pod.namespace=pre;k8s:statefulset.kubernetes.io/pod-name=pre-rabbitmq-1;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=pre-rabbitmq;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=rabbitmq;k8s:io.kubernetes.pod.namespace=pre;k8s:statefulset.kubernetes.io/pod-name=pre-rabbitmq-1;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=85 identity=13980 identityLabels="k8s:app=pre-rabbitmq,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=rabbitmq,k8s:io.kubernetes.pod.namespace=pre,k8s:statefulset.kubernetes.io/pod-name=pre-rabbitmq-1" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=85 identity=13980 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=85 identity=13980 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=85 identity=13980 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=787.763497ms subsys=rate totalDuration=787.876121ms uuid=b21ff288-1490-11ec-9d49-00163e18cc27 waitDurationTotal="91.463µs" level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.193 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-rabbitmq-1 new-hostIP=172.20.4.193 new-podIP=172.20.4.193 new-podIPs="[{172.20.4.193}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=info msg="Serving cilium health API at unix:///var/run/cilium/health.sock" subsys=health-server level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.193 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-rabbitmq-1 new-hostIP=172.20.4.193 new-podIP=172.20.4.193 new-podIPs="[{172.20.4.193}]" old-hostIP=172.20.4.193 old-podIP=172.20.4.193 old-podIPs="[{172.20.4.193}]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.64 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-eureka-1 new-hostIP=172.20.4.64 new-podIP=172.20.4.64 new-podIPs="[{172.20.4.64}]" old-hostIP=172.20.4.64 old-podIP=172.20.4.64 old-podIPs="[{172.20.4.64}]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.221 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-eureka-0 new-hostIP=172.20.4.221 new-podIP=172.20.4.221 new-podIPs="[{172.20.4.221}]" old-hostIP=172.20.4.221 old-podIP=172.20.4.221 old-podIPs="[{172.20.4.221}]" subsys=k8s-watcher level=info msg="Processing API request with rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=d49758dd-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" name=endpoint-delete parallelRequests=4 subsys=rate uuid=d49758dd-1490-11ec-9d49-00163e18cc27 waitDurationTotal="53.688µs" level=info msg="Delete endpoint request" id="container-id:a1efc71aa8b6aefcaa8e249334da001cc2bf7373a3cb86a75922ec92f4703e6c" subsys=daemon level=info msg="API call has been processed" error="endpoint not found" name=endpoint-delete processingDuration="11.494µs" subsys=rate totalDuration="77.925µs" uuid=d49758dd-1490-11ec-9d49-00163e18cc27 waitDurationTotal="53.688µs" level=info msg="Processing API request with rate limiter" maxWaitDuration=15s name=endpoint-create parallelRequests=3 subsys=rate uuid=d4b89c86-1490-11ec-9d49-00163e18cc27 level=info msg="API request released by rate limiter" burst=4 limit=0.42/s maxWaitDuration=15s maxWaitDurationLimiter=14.999937423s name=endpoint-create parallelRequests=3 subsys=rate uuid=d4b89c86-1490-11ec-9d49-00163e18cc27 waitDurationLimiter=0s waitDurationTotal="71.761µs" level=info msg="Create endpoint request" addressing="&{172.20.4.11 d4b87660-1490-11ec-9d49-00163e18cc27 }" containerID=08eed0ae631818d1404c41f98cda673bea4c04877074362d43634486cd67218f datapathConfiguration="&{false true false true 0xc0011e9158}" interface=lxcba6acdc94a31 k8sPodName=pre/pre-zk-2 labels="[]" subsys=daemon sync-build=true level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3928 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Resolving identity labels (blocking)" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3928 identityLabels="k8s:app=pre-zk,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:statefulset.kubernetes.io/pod-name=pre-zk-2" ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Reserved new local key" key="k8s:app=pre-zk;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:statefulset.kubernetes.io/pod-name=pre-zk-2;" subsys=allocator level=info msg="Reusing existing global key" key="k8s:app=pre-zk;k8s:io.cilium.k8s.namespace.labels.env=pre;k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk;k8s:io.cilium.k8s.policy.cluster=default;k8s:io.cilium.k8s.policy.serviceaccount=default;k8s:io.kubernetes.pod.namespace=pre;k8s:statefulset.kubernetes.io/pod-name=pre-zk-2;" subsys=allocator level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3928 identity=30123 identityLabels="k8s:app=pre-zk,k8s:io.cilium.k8s.namespace.labels.env=pre,k8s:io.cilium.k8s.namespace.labels.field.cattle.io/projectId=p-nt9tk,k8s:io.cilium.k8s.policy.cluster=default,k8s:io.cilium.k8s.policy.serviceaccount=default,k8s:io.kubernetes.pod.namespace=pre,k8s:statefulset.kubernetes.io/pod-name=pre-zk-2" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint level=info msg="Waiting for endpoint to be generated" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3928 identity=30123 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Rewrote endpoint BPF program" containerID= datapathPolicyRevision=0 desiredPolicyRevision=2 endpointID=3928 identity=30123 ipv4= ipv6= k8sPodName=/ subsys=endpoint level=info msg="Successful endpoint creation" containerID= datapathPolicyRevision=2 desiredPolicyRevision=2 endpointID=3928 identity=30123 ipv4= ipv6= k8sPodName=/ subsys=daemon level=info msg="API call has been processed" name=endpoint-create processingDuration=406.422543ms subsys=rate totalDuration=406.512007ms uuid=d4b89c86-1490-11ec-9d49-00163e18cc27 waitDurationTotal="71.761µs" level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.11 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-zk-2 new-hostIP=172.20.4.11 new-podIP=172.20.4.11 new-podIPs="[{172.20.4.11}]" old-hostIP= old-podIP= old-podIPs="[]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.193 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-rabbitmq-1 new-hostIP=172.20.4.193 new-podIP=172.20.4.193 new-podIPs="[{172.20.4.193}]" old-hostIP=172.20.4.193 old-podIP=172.20.4.193 old-podIPs="[{172.20.4.193}]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.11 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-zk-2 new-hostIP=172.20.4.11 new-podIP=172.20.4.11 new-podIPs="[{172.20.4.11}]" old-hostIP=172.20.4.11 old-podIP=172.20.4.11 old-podIPs="[{172.20.4.11}]" subsys=k8s-watcher level=warning msg="Unable to update ipcache map entry on pod add" error="ipcache entry for podIP 172.20.4.142 owned by kvstore or agent" k8sNamespace=pre k8sPodName=pre-xl-job-8bdb7c55c-lkzbj new-hostIP=172.20.4.142 new-podIP=172.20.4.142 new-podIPs="[{172.20.4.142}]" old-hostIP=172.20.4.142 old-podIP=172.20.4.142 old-podIPs="[{172.20.4.142}]" subsys=k8s-watcher level=info msg="Conntrack garbage collector interval recalculated" deleteRatio=0.0031174515349835868 newInterval=7m30s subsys=map-ct level=info msg="Conntrack garbage collector interval recalculated" deleteRatio=0.0030348700373681275 newInterval=11m15s subsys=map-ct level=info msg="Conntrack garbage collector interval recalculated" deleteRatio=0.004875749255046073 newInterval=16m53s subsys=map-ct level=info msg="Conntrack garbage collector interval recalculated" deleteRatio=0.007446098368327243 newInterval=25m20s subsys=map-ct level=info msg="Conntrack garbage collector interval recalculated" deleteRatio=0.011155383969554955 newInterval=38m0s subsys=map-ct
- cilium daemon信息状态
root@PRE-BE-K8S-WN2:/home/cilium# cilium status --verbose KVStore: Ok Disabled Kubernetes: Ok 1.18 (v1.18.5) [linux/amd64] Kubernetes APIs: ["cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Namespace", "core/v1::Node", "core/v1::Pods", "core/v1::Service", "discovery/v1beta1::EndpointSlice", "networking.k8s.io/v1::NetworkPolicy"] KubeProxyReplacement: Strict [eth0 (Direct Routing)] Cilium: Ok 1.9.10 (v1.9.10-4e26039) NodeMonitor: Listening for events on 4 CPUs with 64x4096 of shared memory Cilium health daemon: Ok IPAM: IPv4: 11/255 allocated from 172.20.4.0/24, Allocated addresses: 172.20.4.109 (kube-system/hubble-ui-769fb95577-gpdll) 172.20.4.11 (pre/pre-zk-2) 172.20.4.142 (pre/pre-xl-job-8bdb7c55c-lkzbj) 172.20.4.166 (router) 172.20.4.189 (health) 172.20.4.193 (pre/pre-rabbitmq-1) 172.20.4.208 (kube-system/hubble-relay-7995686985-nx74j) 172.20.4.221 (pre/pre-eureka-0) 172.20.4.37 (default/tomcat-85c798b5d5-74n2k) 172.20.4.64 (pre/pre-eureka-1) 172.20.4.74 (fleet-system/fleet-agent-6b5f8d9db7-r4n46) BandwidthManager: Disabled Host Routing: Legacy Masquerading: BPF (ip-masq-agent) [eth0] 172.20.0.0/20 Clock Source for BPF: ktime Controller Status: 54/54 healthy Name Last success Last error Count Message cilium-health-ep 9s ago never 0 no error dns-garbage-collector-job 15s ago never 0 no error endpoint-1022-regeneration-recovery never never 0 no error endpoint-147-regeneration-recovery never never 0 no error endpoint-1616-regeneration-recovery never never 0 no error endpoint-166-regeneration-recovery never never 0 no error endpoint-1777-regeneration-recovery never never 0 no error endpoint-2805-regeneration-recovery never never 0 no error endpoint-3706-regeneration-recovery never never 0 no error endpoint-3928-regeneration-recovery never never 0 no error endpoint-796-regeneration-recovery never never 0 no error endpoint-822-regeneration-recovery never never 0 no error endpoint-85-regeneration-recovery never never 0 no error k8s-heartbeat 15s ago never 0 no error mark-k8s-node-as-available 1h38m10s ago never 0 no error metricsmap-bpf-prom-sync 5s ago never 0 no error neighbor-table-refresh 3m10s ago never 0 no error resolve-identity-1022 3m10s ago never 0 no error resolve-identity-147 3m9s ago never 0 no error resolve-identity-1616 3m10s ago never 0 no error resolve-identity-166 3m10s ago never 0 no error resolve-identity-1777 3m10s ago never 0 no error resolve-identity-2805 3m10s ago never 0 no error resolve-identity-3706 3m10s ago never 0 no error resolve-identity-3928 1m58s ago never 0 no error resolve-identity-796 3m10s ago never 0 no error resolve-identity-822 3m10s ago never 0 no error resolve-identity-85 2m56s ago never 0 no error sync-endpoints-and-host-ips 10s ago never 0 no error sync-lb-maps-with-k8s-services 1h38m10s ago never 0 no error sync-policymap-1022 8s ago never 0 no error sync-policymap-147 6s ago never 0 no error sync-policymap-1616 7s ago never 0 no error sync-policymap-166 8s ago never 0 no error sync-policymap-1777 7s ago never 0 no error sync-policymap-2805 8s ago never 0 no error sync-policymap-3706 6s ago never 0 no error sync-policymap-3928 57s ago never 0 no error sync-policymap-796 6s ago never 0 no error sync-policymap-822 7s ago never 0 no error sync-policymap-85 55s ago never 0 no error sync-to-k8s-ciliumendpoint (1022) 10s ago never 0 no error sync-to-k8s-ciliumendpoint (147) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (1616) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (166) 10s ago never 0 no error sync-to-k8s-ciliumendpoint (1777) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (2805) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (3706) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (3928) 8s ago never 0 no error sync-to-k8s-ciliumendpoint (796) 0s ago never 0 no error sync-to-k8s-ciliumendpoint (822) 9s ago never 0 no error sync-to-k8s-ciliumendpoint (85) 6s ago never 0 no error template-dir-watcher never never 0 no error update-k8s-node-annotations 1h38m14s ago never 0 no error Proxy Status: OK, ip 172.20.4.166, 0 redirects active on ports 10000-20000 Hubble: Ok Current/Max Flows: 4096/4096 (100.00%), Flows/s: 18.85 Metrics: Disabled KubeProxyReplacement Details: Status: Strict Protocols: TCP, UDP Devices: eth0 (Direct Routing) Mode: Hybrid Backend Selection: Random Session Affinity: Enabled XDP Acceleration: Disabled Services: - ClusterIP: Enabled - NodePort: Enabled (Range: 30000-32767) - LoadBalancer: Enabled - externalIPs: Enabled - HostPort: Enabled BPF Maps: dynamic sizing: on (ratio: 0.002500) Name Size Non-TCP connection tracking 145311 TCP connection tracking 290622 Endpoint policy 65535 Events 4 IP cache 512000 IP masquerading agent 16384 IPv4 fragmentation 8192 IPv4 service 65536 IPv6 service 65536 IPv4 service backend 65536 IPv6 service backend 65536 IPv4 service reverse NAT 65536 IPv6 service reverse NAT 65536 Metrics 1024 NAT 290622 Neighbor table 290622 Global policy 16384 Per endpoint policy 65536 Session affinity 65536 Signal 4 Sockmap 65535 Sock reverse NAT 145311 Tunnel 65536 Cluster health: 7/7 reachable (2021-09-13T14:24:21Z) Name IP Node Endpoints pre-be-k8s-wn2 (localhost) 10.1.20.78 reachable reachable pre-be-k8s-wn1 10.1.20.77 reachable reachable pre-be-k8s-wn3 10.1.20.79 reachable reachable pre-k8s-cp1 10.1.0.232 reachable reachable pre-k8s-cp2 10.1.0.233 reachable reachable pre-k8s-cp3 10.1.0.234 reachable reachable pre-sys-k8s-wn1 10.1.20.100 reachable reachable