2021/08/23 09:17:03 Starting execution
2021/08/23 09:17:04 [worker-eu-central-1a-020210823065201810300000009] outdated=2; updated=1; updatedAndReady=1; asgCurrent=3; asgDesired=3; asgMax=3
2021/08/23 09:17:04 [worker-eu-central-1a-020210823065201810300000009][i-xxxxxxxx] Node already started rollout process
2021/08/23 09:17:04 [worker-eu-central-1a-020210823065201810300000009][i-xxxxxxxx] Updated nodes have enough resources available
2021/08/23 09:17:04 [worker-eu-central-1a-020210823065201810300000009][i-xxxxxxxx] Draining node
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0x167787a]
goroutine 41 [running]:
golang.org/x/time/rate.(*Limiter).WaitN(0xc00007f180, 0x0, 0x0, 0x1, 0x0, 0x0)
/app/vendor/golang.org/x/time/rate/rate.go:237 +0xba
golang.org/x/time/rate.(*Limiter).Wait(...)
/app/vendor/golang.org/x/time/rate/rate.go:219
k8s.io/client-go/util/flowcontrol.(*tokenBucketRateLimiter).Wait(0xc0002c3d80, 0x0, 0x0, 0xc000644680, 0xc0009550d8)
/app/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:106 +0x4b
k8s.io/client-go/rest.(*Request).tryThrottleWithInfo(0xc0007685a0, 0x0, 0x0, 0x0, 0x0, 0x42, 0x40)
/app/vendor/k8s.io/client-go/rest/request.go:587 +0xa5
k8s.io/client-go/rest.(*Request).tryThrottle(...)
/app/vendor/k8s.io/client-go/rest/request.go:613
k8s.io/client-go/rest.(*Request).request(0xc0007685a0, 0x0, 0x0, 0xc0009556c8, 0x0, 0x0)
/app/vendor/k8s.io/client-go/rest/request.go:873 +0x2fc
k8s.io/client-go/rest.(*Request).Do(0xc0007685a0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...)
/app/vendor/k8s.io/client-go/rest/request.go:980 +0xf1
k8s.io/client-go/kubernetes/typed/core/v1.(*nodes).Patch(0xc000722480, 0x0, 0x0, 0xc0008ca000, 0x2e, 0x1f67f8d, 0x26, 0xc000573460, 0x1f, 0x20, ...)
/app/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go:186 +0x237
k8s.io/kubectl/pkg/drain.(*CordonHelper).PatchOrReplaceWithContext(0xc000955ab0, 0x0, 0x0, 0x226c958, 0xc0002dab00, 0x1cf0100, 0x0, 0x0, 0x7fa2a30ae8f0, 0x10)
/app/vendor/k8s.io/kubectl/pkg/drain/cordon.go:102 +0x416
k8s.io/kubectl/pkg/drain.RunCordonOrUncordon(0xc00070e8f0, 0xc00077ef00, 0xc000722401, 0xc000504a80, 0x2e)
/app/vendor/k8s.io/kubectl/pkg/drain/default.go:60 +0xb3
github.com/TwinProduction/aws-eks-asg-rolling-update-handler/k8s.(*KubernetesClient).Drain(0xc0003e4760, 0xc000504a80, 0x2e, 0x101, 0x2, 0x1)
/app/k8s/client.go:125 +0x245
main.DoHandleRollingUpgrade(0x22537d8, 0xc0003e4760, 0x227b0e8, 0xc0003e4480, 0x2270ef8, 0xc0003e4490, 0xc0007001b0, 0x3, 0x3, 0x0)
/app/main.go:161 +0x14b4
main.HandleRollingUpgrade.func2(0xc0007df500, 0x22537d8, 0xc0003e4760, 0x227b0e8, 0xc0003e4480, 0x2270ef8, 0xc0003e4490, 0xc0007001b0, 0x3, 0x3)
/app/main.go:96 +0x94
created by main.HandleRollingUpgrade
/app/main.go:95 +0x12e
image:
tag: "latest"
environmentVars:
- name: CLUSTER_NAME
value: "cluster_name"
- name: AWS_REGION
value: "eu-central-1"
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: "arn:aws:iam::111111:role/RollingUpdate"