Build failed in Jenkins: cloud-antrea-gke-conformance-net-policy #732

0 views
Skip to first unread message

antr...@gmail.com

unread,
Mar 25, 2024, 1:57:59 AM3/25/24
to projecta...@googlegroups.com
See <https://jenkins.antrea-ci.rocks/job/cloud-antrea-gke-conformance-net-policy/732/display/redirect>

Changes:


------------------------------------------
[...truncated 1.23 MB...]
service: container.googleapis.com
bindings:
- members:
- serviceAccount:service-74...@gcf-admin-robot.iam.gserviceaccount.com
role: roles/cloudfunctions.serviceAgent
- members:
- serviceAccount:service-74...@gcp-sa-cloudkms.iam.gserviceaccount.com
role: roles/cloudkms.serviceAgent
- members:
- serviceAccount:service-74...@compute-system.iam.gserviceaccount.com
role: roles/compute.serviceAgent
- members:
- principal://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/subject/gavinx
- serviceAccount:antre...@antrea.iam.gserviceaccount.com
- user:anton...@broadcom.com
- user:ding...@gcp.vmware.com
- user:lan...@broadcom.com
- user:yang...@broadcom.com
role: roles/container.admin
- members:
- principal://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/subject/gavinx
role: roles/container.clusterAdmin
- members:
- serviceAccount:service-74...@gcp-sa-gkenode.iam.gserviceaccount.com
role: roles/container.nodeServiceAgent
- members:
- serviceAccount:service-74...@container-engine-robot.iam.gserviceaccount.com
role: roles/container.serviceAgent
- members:
- serviceAccount:service-74...@dataflow-service-producer-prod.iam.gserviceaccount.com
role: roles/dataflow.serviceAgent
- members:
- serviceAccount:service-74...@dataproc-accounts.iam.gserviceaccount.com
role: roles/dataproc.serviceAgent
- members:
- deleted:user:gat...@vmware.com?uid=517121711081662820640
- principal://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/subject/gavinx
- principalSet://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/attribute.role/PowerUser@746622851457
- serviceAccount:7466228514...@developer.gserviceaccount.com
- serviceAccount:746622...@cloudservices.gserviceaccount.com
- serviceAccount:ant...@appspot.gserviceaccount.com
- serviceAccount:service-74...@containerregistry.iam.gserviceaccount.com
- user:6198...@qq.com
- user:anton...@broadcom.com
- user:lan...@broadcom.com
- user:rahul...@broadcom.com
- user:shuya...@broadcom.com
- user:yang...@broadcom.com
- user:zhengsh...@broadcom.com
role: roles/editor
- members:
- user:anton...@broadcom.com
- user:rahul...@broadcom.com
- user:shuya...@broadcom.com
- user:yang...@broadcom.com
- user:zhengsh...@broadcom.com
role: roles/iam.roleAdmin
- members:
- serviceAccount:service-74...@gcp-sa-pubsub.iam.gserviceaccount.com
role: roles/iam.serviceAccountTokenCreator
- members:
- serviceAccount:service-74...@gcp-sa-logging.iam.gserviceaccount.com
role: roles/logging.serviceAgent
- members:
- serviceAccount:service-74...@gcp-sa-networkconnectivity.iam.gserviceaccount.com
role: roles/networkconnectivity.serviceAgent
- members:
- principalSet://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/attribute.role/Admin@746622851457
- serviceAccount:antre...@antrea.iam.gserviceaccount.com
- user:ab...@gcp.vmware.com
- user:ding...@gcp.vmware.com
- user:gat...@gcp.vmware.com
- user:gav...@vmware.com
- user:rah...@gcp.vmware.com
- user:zheng...@gcp.vmware.com
role: roles/owner
- members:
- user:anton...@broadcom.com
- user:rahul...@broadcom.com
- user:shuya...@broadcom.com
- user:yang...@broadcom.com
- user:zhengsh...@broadcom.com
role: roles/resourcemanager.projectIamAdmin
- members:
- principalSet://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/attribute.role/ReadOnly@746622851457
- principalSet://iam.googleapis.com/locations/global/workforcePools/cg-4fc781ac-3b61-4483-9572-60e7cbb5c2c0/attribute.role/SecOps@746622851457
role: roles/viewer
- members:
- serviceAccount:service-74...@gcp-sa-websecurityscanner.iam.gserviceaccount.com
role: roles/websecurityscanner.serviceAgent
etag: BwYUddCaiPg=
version: 1
+ kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user antre...@antrea.iam.gserviceaccount.com
clusterrolebinding.rbac.authorization.k8s.io/cluster-admin-binding created
+ kubectl apply -f <https://jenkins.antrea-ci.rocks/job/cloud-antrea-gke-conformance-net-policy/ws/ci/../build/yamls/antrea-gke-node-init.yml>
daemonset.apps/antrea-node-init created
+ kubectl apply -f <https://jenkins.antrea-ci.rocks/job/cloud-antrea-gke-conformance-net-policy/ws/ci/../build/yamls/antrea-gke.yml>
customresourcedefinition.apiextensions.k8s.io/antreaagentinfos.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/antreacontrollerinfos.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/clustergroups.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/clusternetworkpolicies.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/egresses.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/externalentities.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/externalippools.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/externalnodes.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/groups.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/supportbundlecollections.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/tiers.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/traceflows.crd.antrea.io created
customresourcedefinition.apiextensions.k8s.io/trafficcontrols.crd.antrea.io created
serviceaccount/antrea-agent created
serviceaccount/antctl created
serviceaccount/antrea-controller created
secret/antrea-agent-service-account-token created
secret/antctl-service-account-token created
configmap/antrea-config created
clusterrole.rbac.authorization.k8s.io/antrea-agent created
clusterrole.rbac.authorization.k8s.io/antctl created
clusterrole.rbac.authorization.k8s.io/antrea-cluster-identity-reader created
clusterrole.rbac.authorization.k8s.io/antrea-controller created
clusterrole.rbac.authorization.k8s.io/aggregate-antrea-policies-edit created
clusterrole.rbac.authorization.k8s.io/aggregate-antrea-policies-view created
clusterrole.rbac.authorization.k8s.io/aggregate-traceflows-edit created
clusterrole.rbac.authorization.k8s.io/aggregate-traceflows-view created
clusterrole.rbac.authorization.k8s.io/aggregate-antrea-clustergroups-edit created
clusterrole.rbac.authorization.k8s.io/aggregate-antrea-clustergroups-view created
clusterrolebinding.rbac.authorization.k8s.io/antrea-agent created
clusterrolebinding.rbac.authorization.k8s.io/antctl created
clusterrolebinding.rbac.authorization.k8s.io/antrea-controller created
service/antrea created
daemonset.apps/antrea-agent created
deployment.apps/antrea-controller created
apiservice.apiregistration.k8s.io/v1beta2.controlplane.antrea.io created
apiservice.apiregistration.k8s.io/v1beta1.system.antrea.io created
apiservice.apiregistration.k8s.io/v1alpha1.stats.antrea.io created
mutatingwebhookconfiguration.admissionregistration.k8s.io/crdmutator.antrea.io created
validatingwebhookconfiguration.admissionregistration.k8s.io/crdvalidator.antrea.io created
+ kubectl rollout status --timeout=2m deployment.apps/antrea-controller -n kube-system
Waiting for deployment "antrea-controller" rollout to finish: 0 of 1 updated replicas are available...
deployment "antrea-controller" successfully rolled out
+ kubectl rollout status --timeout=2m daemonset/antrea-agent -n kube-system
Waiting for daemon set "antrea-agent" rollout to finish: 0 of 3 updated pods are available...
Waiting for daemon set "antrea-agent" rollout to finish: 1 of 3 updated pods are available...
Waiting for daemon set "antrea-agent" rollout to finish: 2 of 3 updated pods are available...
daemon set "antrea-agent" successfully rolled out
++ kubectl get ns '-o=jsonpath={.items[*].metadata.name}' --no-headers=true
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ kubectl get pods -n default -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ grep '<none>'
++ awk '{ print $1 }'
+ pods=
+ '[' -z '' ']'
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ kubectl get pods -n gke-managed-system -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ grep '<none>'
++ awk '{ print $1 }'
+ pods=
+ '[' -z '' ']'
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ kubectl get pods -n gmp-public -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ grep '<none>'
++ awk '{ print $1 }'
+ pods=
+ '[' -z '' ']'
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ kubectl get pods -n gmp-system -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ grep '<none>'
++ awk '{ print $1 }'
+ pods='collector-n8csm
collector-tb29h
collector-xmpmv
gmp-operator-76c6bfcf5d-d8xgj
rule-evaluator-6f4bb4d4db-rmd6z'
+ '[' -z 'collector-n8csm
collector-tb29h
collector-xmpmv
gmp-operator-76c6bfcf5d-d8xgj
rule-evaluator-6f4bb4d4db-rmd6z' ']'
+ kubectl delete pods -n gmp-system collector-n8csm collector-tb29h collector-xmpmv gmp-operator-76c6bfcf5d-d8xgj rule-evaluator-6f4bb4d4db-rmd6z
pod "collector-n8csm" deleted
pod "collector-tb29h" deleted
pod "collector-xmpmv" deleted
pod "gmp-operator-76c6bfcf5d-d8xgj" deleted
pod "rule-evaluator-6f4bb4d4db-rmd6z" deleted
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ kubectl get pods -n kube-node-lease -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ grep '<none>'
++ awk '{ print $1 }'
+ pods=
+ '[' -z '' ']'
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ grep '<none>'
++ kubectl get pods -n kube-public -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ awk '{ print $1 }'
+ pods=
+ '[' -z '' ']'
+ for ns in $(kubectl get ns -o=jsonpath=''{.items[*].metadata.name}'' --no-headers=true)
++ grep '<none>'
++ kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true
++ awk '{ print $1 }'
+ pods='event-exporter-gke-8f5b7b66-rqd8m
konnectivity-agent-567448d85b-k54lh
konnectivity-agent-567448d85b-tnlrx
konnectivity-agent-567448d85b-vzn95
konnectivity-agent-autoscaler-6669f649bd-dnhf7
kube-dns-66496f9c59-rfl9k
kube-dns-66496f9c59-vxcf7
kube-dns-autoscaler-79b96f5cb-56njk
l7-default-backend-788657d69c-8vkbc
metrics-server-v0.7.0-7c57f474f4-nrcr2'
+ '[' -z 'event-exporter-gke-8f5b7b66-rqd8m
konnectivity-agent-567448d85b-k54lh
konnectivity-agent-567448d85b-tnlrx
konnectivity-agent-567448d85b-vzn95
konnectivity-agent-autoscaler-6669f649bd-dnhf7
kube-dns-66496f9c59-rfl9k
kube-dns-66496f9c59-vxcf7
kube-dns-autoscaler-79b96f5cb-56njk
l7-default-backend-788657d69c-8vkbc
metrics-server-v0.7.0-7c57f474f4-nrcr2' ']'
+ kubectl delete pods -n kube-system event-exporter-gke-8f5b7b66-rqd8m konnectivity-agent-567448d85b-k54lh konnectivity-agent-567448d85b-tnlrx konnectivity-agent-567448d85b-vzn95 konnectivity-agent-autoscaler-6669f649bd-dnhf7 kube-dns-66496f9c59-rfl9k kube-dns-66496f9c59-vxcf7 kube-dns-autoscaler-79b96f5cb-56njk l7-default-backend-788657d69c-8vkbc metrics-server-v0.7.0-7c57f474f4-nrcr2
pod "event-exporter-gke-8f5b7b66-rqd8m" deleted
pod "konnectivity-agent-567448d85b-k54lh" deleted
pod "konnectivity-agent-567448d85b-tnlrx" deleted
pod "konnectivity-agent-567448d85b-vzn95" deleted
pod "konnectivity-agent-autoscaler-6669f649bd-dnhf7" deleted
pod "kube-dns-66496f9c59-rfl9k" deleted
pod "kube-dns-66496f9c59-vxcf7" deleted
pod "kube-dns-autoscaler-79b96f5cb-56njk" deleted
pod "l7-default-backend-788657d69c-8vkbc" deleted
pod "metrics-server-v0.7.0-7c57f474f4-nrcr2" deleted
+ kubectl rollout status --timeout=2m deployment.apps/kube-dns -n kube-system
deployment "kube-dns" successfully rolled out
+ sleep 5
+ echo '=== Antrea has been deployed for GKE cluster antrea-gke-732 ==='
=== Antrea has been deployed for GKE cluster antrea-gke-732 ===
+ run_conformance
+ echo '=== Running Antrea Conformance and Network Policy Tests ==='
=== Running Antrea Conformance and Network Policy Tests ===
+ gcloud compute firewall-rules create allow-nodeport --allow tcp:30000-32767
Creating firewall...
failed.
ERROR: (gcloud.compute.firewall-rules.create) Could not fetch resource:
- The resource 'projects/antrea/global/firewalls/allow-nodeport' already exists

Build step 'Execute shell' marked build as failure
Archiving artifacts

antr...@gmail.com

unread,
Mar 27, 2024, 2:12:38 AM3/27/24
to projecta...@googlegroups.com
Reply all
Reply to author
Forward
0 new messages