From 6100c61f33eb289dd4a5d3cd4f55279134b0fbba Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 17 Feb 2021 00:24:37 +0530 Subject: [PATCH] Add support for custom object to triggers eventlistener --- config/200-clusterrole.yaml | 4 +- config/300-eventlistener.yaml | 8 +- examples/custom-resource/README.md | 47 + ...ntlistener-interceptor-customresource.yaml | 88 ++ examples/custom-resource/rbac.yaml | 65 ++ examples/custom-resource/secret.yaml | 7 + go.mod | 1 + go.sum | 13 + .../triggers/v1alpha1/event_listener_types.go | 22 +- .../v1alpha1/event_listener_types_test.go | 12 + .../v1alpha1/event_listener_validation.go | 39 +- .../event_listener_validation_test.go | 131 +++ .../v1alpha1/zz_generated.deepcopy.go | 22 + .../v1alpha1/eventlistener/controller.go | 7 + .../v1alpha1/eventlistener/eventlistener.go | 473 +++++++++- .../eventlistener/eventlistener_test.go | 866 +++++++++++------- pkg/resources/create.go | 2 +- test/controller.go | 73 +- test/controller_test.go | 24 + test/wait.go | 11 +- .../ducks/duck/v1/podspecable/fake/fake.go | 30 + .../ducks/duck/v1/podspecable/podspecable.go | 60 ++ .../clients/dynamicclient/dynamicclient.go | 49 + .../clients/dynamicclient/fake/fake.go | 53 ++ vendor/modules.txt | 5 + 25 files changed, 1741 insertions(+), 371 deletions(-) create mode 100644 examples/custom-resource/README.md create mode 100644 examples/custom-resource/github-eventlistener-interceptor-customresource.yaml create mode 100644 examples/custom-resource/rbac.yaml create mode 100644 examples/custom-resource/secret.yaml create mode 100644 vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake/fake.go create mode 100644 vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/podspecable.go create mode 100644 vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go create mode 100644 vendor/knative.dev/pkg/injection/clients/dynamicclient/fake/fake.go diff --git a/config/200-clusterrole.yaml b/config/200-clusterrole.yaml index 8139973836..10dfcdd89d 100644 --- a/config/200-clusterrole.yaml +++ b/config/200-clusterrole.yaml @@ -39,7 +39,9 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - + - apiGroups: ["serving.knative.dev"] + resources: ["*", "*/status", "*/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/300-eventlistener.yaml b/config/300-eventlistener.yaml index 969b98c2f1..9956de0606 100644 --- a/config/300-eventlistener.yaml +++ b/config/300-eventlistener.yaml @@ -61,4 +61,10 @@ spec: jsonPath: ".status.conditions[?(@.type=='Available')].status" - name: Reason type: string - jsonPath: ".status.conditions[?(@.type=='Available')].reason" \ No newline at end of file + jsonPath: ".status.conditions[?(@.type=='Available')].reason" + - name: Ready + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/examples/custom-resource/README.md b/examples/custom-resource/README.md new file mode 100644 index 0000000000..2bda5dd476 --- /dev/null +++ b/examples/custom-resource/README.md @@ -0,0 +1,47 @@ +## GitHub EventListener + +Creates an EventListener that listens for GitHub webhook events. + +### Try it out locally: + +1. To create the GitHub trigger and all related resources, run: + + ```bash + kubectl apply -f examples/custom-resource/ + ``` + +1. Port forward: + + ```bash + kubectl port-forward \ + "$(kubectl get pod --selector=eventlistener=github-listener-interceptor-customresource -oname)" \ + 8080 + ``` + + **Note**: Instead of port forwarding, you can set the + [`serviceType`](https://github.com/tektoncd/triggers/blob/master/docs/eventlisteners.md#serviceType) + to `LoadBalancer` to expose the EventListener with a public IP. + +1. Test by sending the sample payload. + + ```bash + curl -v \ + -H 'X-GitHub-Event: pull_request' \ + -H 'X-Hub-Signature: sha1=ba0cdc263b3492a74b601d240c27efe81c4720cb' \ + -H 'Content-Type: application/json' \ + -d '{"action": "opened", "pull_request":{"head":{"sha": "28911bbb5a3e2ea034daf1f6be0a822d50e31e73"}},"repository":{"clone_url": "https://github.com/tektoncd/triggers.git"}}' \ + http://localhost:8080 + ``` + + The response status code should be `201 Created` + + [`HMAC`](https://www.freeformatter.com/hmac-generator.html) tool used to create X-Hub-Signature. + + In [`HMAC`](https://www.freeformatter.com/hmac-generator.html) `string` is the *body payload ex:* `{"action": "opened", "pull_request":{"head":{"sha": "28911bbb5a3e2ea034daf1f6be0a822d50e31e73"}},"repository":{"clone_url": "https://github.com/tektoncd/triggers.git"}}` + and `secretKey` is the *given secretToken ex:* `1234567`. + +1. You should see a new TaskRun that got created: + + ```bash + kubectl get taskruns | grep github-run- + ``` diff --git a/examples/custom-resource/github-eventlistener-interceptor-customresource.yaml b/examples/custom-resource/github-eventlistener-interceptor-customresource.yaml new file mode 100644 index 0000000000..a3d101da94 --- /dev/null +++ b/examples/custom-resource/github-eventlistener-interceptor-customresource.yaml @@ -0,0 +1,88 @@ +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: EventListener +metadata: + name: github-listener-interceptor-customresource +spec: + triggers: + - name: github-listener + interceptors: + - github: + secretRef: + secretName: github-secret + secretKey: secretToken + eventTypes: + - pull_request + - cel: + filter: "body.action in ['opened', 'synchronize', 'reopened']" + bindings: + - ref: github-pr-binding + template: + ref: github-template + resources: + customResource: + apiVersion: serving.knative.dev/v1 + kind: Service + metadata: + name: knativeservice + spec: + template: + metadata: + name: knativeservice-rev + spec: + serviceAccountName: tekton-triggers-example-sa + containers: + - resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: TriggerBinding +metadata: + name: github-pr-binding +spec: + params: + - name: gitrevision + value: $(body.pull_request.head.sha) + - name: gitrepositoryurl + value: $(body.repository.clone_url) + +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: TriggerTemplate +metadata: + name: github-template +spec: + params: + - name: gitrevision + - name: gitrepositoryurl + resourcetemplates: + - apiVersion: tekton.dev/v1alpha1 + kind: TaskRun + metadata: + generateName: github-run- + spec: + taskSpec: + inputs: + resources: + - name: source + type: git + steps: + - image: ubuntu + script: | + #! /bin/bash + ls -al $(inputs.resources.source.path) + inputs: + resources: + - name: source + resourceSpec: + type: git + params: + - name: revision + value: $(tt.params.gitrevision) + - name: url + value: $(tt.params.gitrepositoryurl) diff --git a/examples/custom-resource/rbac.yaml b/examples/custom-resource/rbac.yaml new file mode 100644 index 0000000000..e37449e8ad --- /dev/null +++ b/examples/custom-resource/rbac.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-triggers-example-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tekton-triggers-example-minimal +rules: +# EventListeners need to be able to fetch all namespaced resources +- apiGroups: ["triggers.tekton.dev"] + resources: ["eventlisteners", "triggerbindings", "triggertemplates", "triggers"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] +# secrets are only needed for GitHub/GitLab interceptors +# configmaps is needed for updating logging config + resources: ["configmaps", "secrets"] + verbs: ["get", "list", "watch"] +# Permissions to create resources in associated TriggerTemplates +- apiGroups: ["tekton.dev"] + resources: ["pipelineruns", "pipelineresources", "taskruns"] + verbs: ["create"] +- apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["impersonate"] +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: ["tekton-triggers"] + verbs: ["use"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tekton-triggers-example-binding +subjects: +- kind: ServiceAccount + name: tekton-triggers-example-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-triggers-example-minimal +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-triggers-example-clusterrole +rules: +# EventListeners need to be able to fetch any clustertriggerbindings +- apiGroups: ["triggers.tekton.dev"] + resources: ["clustertriggerbindings"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-triggers-example-clusterbinding +subjects: +- kind: ServiceAccount + name: tekton-triggers-example-sa + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-triggers-example-clusterrole diff --git a/examples/custom-resource/secret.yaml b/examples/custom-resource/secret.yaml new file mode 100644 index 0000000000..beb4f9c894 --- /dev/null +++ b/examples/custom-resource/secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: github-secret +type: Opaque +stringData: + secretToken: "1234567" diff --git a/go.mod b/go.mod index 2d03c83899..e895252c39 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/google/go-github/v31 v31.0.0 github.com/google/uuid v1.2.0 github.com/gorilla/mux v1.7.4 + github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.0.0 github.com/tektoncd/pipeline v0.20.1-0.20210203144343-1b7a37f0d21d github.com/tektoncd/plumbing v0.0.0-20201021153918-6b7e894737b5 diff --git a/go.sum b/go.sum index 50650d249b..0018b15fec 100644 --- a/go.sum +++ b/go.sum @@ -246,6 +246,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -362,6 +363,7 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -437,6 +439,7 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702 h1:nVgx26pAe6l/02mYomOuZssv28XkacGw/0WeiTVorqw= github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -535,6 +538,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/tdigest v0.0.0-20180711151920-a7d76c6f093a/go.mod h1:9GkyshztGufsdPQWjH+ifgnIr3xNUL5syI70g2dzU1o= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -568,6 +572,7 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -618,6 +623,7 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -691,6 +697,7 @@ github.com/openzipkin/zipkin-go v0.2.2 h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -775,6 +782,7 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= @@ -807,6 +815,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -848,6 +857,7 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= @@ -1357,12 +1367,15 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/apis/triggers/v1alpha1/event_listener_types.go b/pkg/apis/triggers/v1alpha1/event_listener_types.go index 26ea2fbf48..3d7e65cb88 100644 --- a/pkg/apis/triggers/v1alpha1/event_listener_types.go +++ b/pkg/apis/triggers/v1alpha1/event_listener_types.go @@ -22,10 +22,12 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + "knative.dev/pkg/apis/duck/v1beta1" ) // Check that EventListener may be validated and defaulted. @@ -66,6 +68,11 @@ type EventListenerSpec struct { type Resources struct { KubernetesResource *KubernetesResource `json:"kubernetesResource,omitempty"` + CustomResource *CustomResource `json:"customResource,omitempty"` +} + +type CustomResource struct { + runtime.RawExtension `json:",inline"` } type KubernetesResource struct { @@ -169,7 +176,8 @@ const ( ServiceExists apis.ConditionType = "Service" // DeploymentExists is the ConditionType set on the EventListener, which // specifies Deployment existence. - DeploymentExists apis.ConditionType = "Deployment" + DeploymentExists apis.ConditionType = "Deployment" + KnativeServiceExists apis.ConditionType = "Knative Service" ) // Check that EventListener may be validated and defaulted. @@ -224,6 +232,17 @@ func (els *EventListenerStatus) SetDeploymentConditions(deploymentConditions []a } } +func (els *EventListenerStatus) SetConditionsForDynamicObjects(conditions v1beta1.Conditions) { + for _, cond := range conditions { + els.SetCondition(&apis.Condition{ + Type: cond.Type, + Status: cond.Status, + Reason: cond.Reason, + Message: cond.Message, + }) + } +} + // SetExistsCondition simplifies setting the exists conditions on the // EventListenerStatus. func (els *EventListenerStatus) SetExistsCondition(cond apis.ConditionType, err error) { @@ -250,6 +269,7 @@ func (els *EventListenerStatus) InitializeConditions() { for _, condition := range []apis.ConditionType{ ServiceExists, DeploymentExists, + KnativeServiceExists, } { els.SetCondition(&apis.Condition{ Type: condition, diff --git a/pkg/apis/triggers/v1alpha1/event_listener_types_test.go b/pkg/apis/triggers/v1alpha1/event_listener_types_test.go index 9a0ec8925c..aa956dab73 100644 --- a/pkg/apis/triggers/v1alpha1/event_listener_types_test.go +++ b/pkg/apis/triggers/v1alpha1/event_listener_types_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/apis/duck/v1beta1" ) func TestSetGetCondition(t *testing.T) { @@ -100,6 +101,7 @@ func TestInitializeConditions(t *testing.T) { var conditionTypes = []apis.ConditionType{ ServiceExists, DeploymentExists, + KnativeServiceExists, } els := &EventListenerStatus{} els.InitializeConditions() @@ -272,3 +274,13 @@ func TestSetDeploymentConditions(t *testing.T) { }) } } + +func TestSetConditionsForDynamicObjects(t *testing.T) { + var status EventListenerStatus + status.SetConditionsForDynamicObjects(v1beta1.Conditions{{ + Type: KnativeServiceExists, + Status: corev1.ConditionTrue, + Reason: "Reason", + Message: "Message", + }}) +} diff --git a/pkg/apis/triggers/v1alpha1/event_listener_validation.go b/pkg/apis/triggers/v1alpha1/event_listener_validation.go index 4c6c938ba0..e3c5ef7e77 100644 --- a/pkg/apis/triggers/v1alpha1/event_listener_validation.go +++ b/pkg/apis/triggers/v1alpha1/event_listener_validation.go @@ -17,13 +17,16 @@ limitations under the License. package v1alpha1 import ( + "bytes" "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) var ( @@ -57,9 +60,43 @@ func (s *EventListenerSpec) validate(ctx context.Context) (errs *apis.FieldError for i, trigger := range s.Triggers { errs = errs.Also(trigger.validate(ctx).ViaField(fmt.Sprintf("spec.triggers[%d]", i))) } + // Both Kubernetes and Custom resource can't be present at the same time + if s.Resources.KubernetesResource != nil && s.Resources.CustomResource != nil { + return apis.ErrMultipleOneOf("custom resource", "kubernetes resource") + } + if s.Resources.KubernetesResource != nil { errs = errs.Also(validateKubernetesObject(s.Resources.KubernetesResource).ViaField("spec.resources.kubernetesResource")) } + + if s.Resources.CustomResource != nil { + errs = errs.Also(validateCustomObject(s.Resources.CustomResource).ViaField("spec.resources.customResource")) + } + return errs +} + +func validateCustomObject(customData *CustomResource) (errs *apis.FieldError) { + orig := duckv1.WithPod{} + decoder := json.NewDecoder(bytes.NewBuffer(customData.RawExtension.Raw)) + + if err := decoder.Decode(&orig); err != nil { + errs = errs.Also(apis.ErrInvalidValue(err, "spec")) + } + + if len(orig.Spec.Template.Spec.Containers) > 1 { + errs = errs.Also(apis.ErrMultipleOneOf("containers").ViaField("spec.template.spec")) + } + errs = errs.Also(apis.CheckDisallowedFields(orig.Spec.Template.Spec, + *podSpecMask(&orig.Spec.Template.Spec)).ViaField("spec.template.spec")) + + // bounded by condition because containers fields are optional so there is a chance that containers can be nil. + if len(orig.Spec.Template.Spec.Containers) == 1 { + errs = errs.Also(apis.CheckDisallowedFields(orig.Spec.Template.Spec.Containers[0], + *containerFieldMask(&orig.Spec.Template.Spec.Containers[0])).ViaField("spec.template.spec.containers[0]")) + // validate env + errs = errs.Also(validateEnv(orig.Spec.Template.Spec.Containers[0].Env).ViaField("spec.template.spec.containers[0].env")) + } + return errs } @@ -168,7 +205,6 @@ func containerFieldMask(in *corev1.Container) *corev1.Container { out.VolumeMounts = nil out.ImagePullPolicy = "" out.Lifecycle = nil - out.SecurityContext = nil out.Stdin = false out.StdinOnce = false out.TerminationMessagePath = "" @@ -195,7 +231,6 @@ func podSpecMask(in *corev1.PodSpec) *corev1.PodSpec { // Disallowed fields // This list clarifies which all podspec fields are not allowed. out.Volumes = nil - out.ImagePullSecrets = nil out.EnableServiceLinks = nil out.ImagePullSecrets = nil out.InitContainers = nil diff --git a/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go b/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go index 563a8041ed..87cb3983cb 100644 --- a/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go +++ b/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go @@ -21,10 +21,12 @@ import ( "testing" "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" + "github.com/tektoncd/triggers/test" bldr "github.com/tektoncd/triggers/test/builder" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/ptr" ) @@ -212,6 +214,29 @@ func Test_EventListenerValidate(t *testing.T) { }), )), )), + }, { + name: "Valid EventListener with custom resources", + el: &v1alpha1.EventListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.EventListenerSpec{ + Triggers: []v1alpha1.EventListenerTrigger{{ + Bindings: []*v1alpha1.EventListenerBinding{{ + Ref: "tb", + Kind: "TriggerBinding", + APIVersion: "v1alpha1", + }}, + TriggerRef: "triggerref", + }}, + Resources: v1alpha1.Resources{ + CustomResource: &v1alpha1.CustomResource{ + RawExtension: getValidRawData(t), + }, + }, + }, + }, }} for _, test := range tests { @@ -575,6 +600,53 @@ func TestEventListenerValidate_error(t *testing.T) { }), )), )), + }, { + name: "user specify both kubernetes and custom resources", + el: &v1alpha1.EventListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.EventListenerSpec{ + Triggers: []v1alpha1.EventListenerTrigger{{ + Bindings: []*v1alpha1.EventListenerBinding{{ + Ref: "tb", + Kind: "TriggerBinding", + APIVersion: "v1alpha1", + }}, + }}, + Resources: v1alpha1.Resources{ + KubernetesResource: &v1alpha1.KubernetesResource{ + ServiceType: "NodePort", + }, + CustomResource: &v1alpha1.CustomResource{ + RawExtension: runtime.RawExtension{Raw: []byte(`{"rt1": "value"}`)}, + }, + }, + }, + }, + }, { + name: "user specify multiple containers, unsupported podspec and container field in custom resources", + el: &v1alpha1.EventListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: v1alpha1.EventListenerSpec{ + Triggers: []v1alpha1.EventListenerTrigger{{ + Bindings: []*v1alpha1.EventListenerBinding{{ + Ref: "tb", + Kind: "TriggerBinding", + APIVersion: "v1alpha1", + }}, + }}, + Resources: v1alpha1.Resources{ + CustomResource: &v1alpha1.CustomResource{ + RawExtension: getRawData(t), + }, + }, + }, + }, }} for _, test := range tests { @@ -585,3 +657,62 @@ func TestEventListenerValidate_error(t *testing.T) { }) } } + +func getRawData(t *testing.T) runtime.RawExtension { + return test.RawExtension(t, duckv1.WithPod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "serving.knative.dev/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "knativeservice", + }, + Spec: duckv1.WithPodSpec{Template: duckv1.PodSpecable{ + Spec: corev1.PodSpec{ + ServiceAccountName: "tekton-triggers-example-sa", + NodeName: "minikube", + Containers: []corev1.Container{{ + Name: "first-container", + }, { + Env: []corev1.EnvVar{{ + Name: "key", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "test"}, + Key: "a.crt", + }, + }, + }}, + }}, + }, + }}, + }) +} + +func getValidRawData(t *testing.T) runtime.RawExtension { + return test.RawExtension(t, duckv1.WithPod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "serving.knative.dev/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "knativeservice", + }, + Spec: duckv1.WithPodSpec{Template: duckv1.PodSpecable{ + Spec: corev1.PodSpec{ + ServiceAccountName: "tekton-triggers-example-sa", + Containers: []corev1.Container{{ + Env: []corev1.EnvVar{{ + Name: "key", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "test"}, + Key: "a.crt", + }, + }, + }}, + }}, + }, + }}, + }) +} diff --git a/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go index 19b387fff4..6fae22063a 100644 --- a/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go @@ -150,6 +150,23 @@ func (in *ClusterTriggerBindingList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResource) DeepCopyInto(out *CustomResource) { + *out = *in + in.RawExtension.DeepCopyInto(&out.RawExtension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResource. +func (in *CustomResource) DeepCopy() *CustomResource { + if in == nil { + return nil + } + out := new(CustomResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventListener) DeepCopyInto(out *EventListener) { *out = *in @@ -485,6 +502,11 @@ func (in *Resources) DeepCopyInto(out *Resources) { *out = new(KubernetesResource) (*in).DeepCopyInto(*out) } + if in.CustomResource != nil { + in, out := &in.CustomResource, &out.CustomResource + *out = new(CustomResource) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/reconciler/v1alpha1/eventlistener/controller.go b/pkg/reconciler/v1alpha1/eventlistener/controller.go index 1e4d627714..00088a329b 100644 --- a/pkg/reconciler/v1alpha1/eventlistener/controller.go +++ b/pkg/reconciler/v1alpha1/eventlistener/controller.go @@ -25,12 +25,14 @@ import ( eventlistenerreconciler "github.com/tektoncd/triggers/pkg/client/injection/reconciler/triggers/v1alpha1/eventlistener" "k8s.io/client-go/tools/cache" + duckinformer "knative.dev/pkg/client/injection/ducks/duck/v1/podspecable" kubeclient "knative.dev/pkg/client/injection/kube/client" deployinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" configmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap" serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" + "knative.dev/pkg/injection/clients/dynamicclient" "knative.dev/pkg/logging" ) @@ -38,6 +40,7 @@ import ( func NewController(config Config) func(context.Context, configmap.Watcher) *controller.Impl { return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl { logger := logging.FromContext(ctx) + dynamicclientset := dynamicclient.Get(ctx) kubeclientset := kubeclient.Get(ctx) triggersclientset := triggersclient.Get(ctx) eventListenerInformer := eventlistenerinformer.Get(ctx) @@ -45,6 +48,7 @@ func NewController(config Config) func(context.Context, configmap.Watcher) *cont serviceInformer := serviceinformer.Get(ctx) reconciler := &Reconciler{ + DynamicClientSet: dynamicclientset, KubeClientSet: kubeclientset, TriggersClientSet: triggersclientset, configmapLister: configmapinformer.Get(ctx).Lister(), @@ -62,6 +66,9 @@ func NewController(config Config) func(context.Context, configmap.Watcher) *cont }) logger.Info("Setting up event handlers") + + reconciler.podspecableTracker = NewListableTracker(ctx, duckinformer.Get, impl) + eventListenerInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: impl.Enqueue, UpdateFunc: controller.PassNew(impl.Enqueue), diff --git a/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go b/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go index daa25a4e7a..219332faf5 100644 --- a/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go +++ b/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go @@ -17,14 +17,21 @@ limitations under the License. package eventlistener import ( + "bytes" "context" + "encoding/json" "fmt" "reflect" "strconv" + "strings" + "sync" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + logger "github.com/sirupsen/logrus" "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" + triggersclientset "github.com/tektoncd/triggers/pkg/client/clientset/versioned" + eventlistenerreconciler "github.com/tektoncd/triggers/pkg/client/injection/reconciler/triggers/v1alpha1/eventlistener" listers "github.com/tektoncd/triggers/pkg/client/listers/triggers/v1alpha1" "github.com/tektoncd/triggers/pkg/system" "go.uber.org/zap" @@ -32,18 +39,24 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" appsv1lister "k8s.io/client-go/listers/apps/v1" corev1lister "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/apis/duck/v1beta1" + "knative.dev/pkg/controller" "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - - triggersclientset "github.com/tektoncd/triggers/pkg/client/clientset/versioned" - eventlistenerreconciler "github.com/tektoncd/triggers/pkg/client/injection/reconciler/triggers/v1alpha1/eventlistener" "knative.dev/pkg/ptr" + pkgreconciler "knative.dev/pkg/reconciler" ) const ( @@ -66,6 +79,7 @@ const ( // Reconciler implements controller.Reconciler for Configuration resources. type Reconciler struct { + DynamicClientSet dynamic.Interface // KubeClientSet allows us to talk to the k8s for core APIs KubeClientSet kubernetes.Interface @@ -80,7 +94,9 @@ type Reconciler struct { serviceLister corev1lister.ServiceLister // config is the configuration options that the Reconciler accepts. - config Config + config Config + podspecableTracker ListableTracker + onlyOnce sync.Once } var ( @@ -90,6 +106,39 @@ var ( _ eventlistenerreconciler.Finalizer = (*Reconciler)(nil) ) +type ListableTracker interface { + WatchOnDynamicObject(ctx context.Context, gvr schema.GroupVersionResource) error +} + +type listableTracker struct { + informerFactory duck.InformerFactory + impl *controller.Impl +} + +// NewListableTracker creates a new ListableTracker, backed by a TypedInformerFactory. +func NewListableTracker(ctx context.Context, getter func(ctx context.Context) duck.InformerFactory, impl *controller.Impl) ListableTracker { + return &listableTracker{ + informerFactory: getter(ctx), + impl: impl, + } +} + +func (t *listableTracker) WatchOnDynamicObject(ctx context.Context, gvr schema.GroupVersionResource) error { + return t.watchOnDynamicObject(ctx, gvr) +} + +func (t *listableTracker) watchOnDynamicObject(ctx context.Context, gvr schema.GroupVersionResource) error { + shInformer, _, err := t.informerFactory.Get(ctx, gvr) + if err != nil { + return err + } + shInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterControllerGVK(v1alpha1.SchemeGroupVersion.WithKind("EventListener")), + Handler: controller.HandleAll(t.impl.EnqueueControllerOf), + }) + return nil +} + // ReconcileKind compares the actual state with the desired, and attempts to // converge the two. func (r *Reconciler) ReconcileKind(ctx context.Context, el *v1alpha1.EventListener) pkgreconciler.Event { @@ -103,6 +152,10 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, el *v1alpha1.EventListen // and may not have had all of the assumed default specified. el.SetDefaults(v1alpha1.WithUpgradeViaDefaulting(ctx)) + if el.Spec.Resources.CustomResource != nil { + kError := r.reconcileCustomObject(ctx, logger, el) + return wrapError(kError, nil) + } deploymentReconcileError := r.reconcileDeployment(ctx, logger, el) serviceReconcileError := r.reconcileService(ctx, logger, el) @@ -359,6 +412,403 @@ func (r *Reconciler) reconcileDeployment(ctx context.Context, logger *zap.Sugare return nil } +func reconcileCustomObjectMeta(existing *unstructured.Unstructured, desired *unstructured.Unstructured) (updated bool) { + updatedMetaLabel, _, _ := unstructured.NestedFieldCopy(existing.Object, "metadata", "labels") + originalMetaLabel, _, _ := unstructured.NestedFieldCopy(desired.Object, "metadata", "labels") + if !reflect.DeepEqual(originalMetaLabel, updatedMetaLabel) { + updated = true + if err := unstructured.SetNestedField(existing.Object, originalMetaLabel, "metadata", "labels"); err != nil { + logger.Error("failed to set metadata labels to existing object: ", err) + updated = false + } + } + + updatedMetaOwner, _, _ := unstructured.NestedFieldCopy(existing.Object, "metadata", "ownerReferences") + originalMetaOwner, _, _ := unstructured.NestedFieldCopy(desired.Object, "metadata", "ownerReferences") + if !reflect.DeepEqual(originalMetaOwner, updatedMetaOwner) { + updated = true + if err := unstructured.SetNestedField(existing.Object, originalMetaOwner, "metadata", "ownerReferences"); err != nil { + logger.Error("failed to set metadata ownerReferences to existing object: ", err) + updated = false + } + } + + existingMetaAnno, _, _ := unstructured.NestedFieldCopy(existing.Object, "metadata", "annotations") + originalMetaAnno, _, _ := unstructured.NestedFieldCopy(desired.Object, "metadata", "annotations") + originalAnno, _ := originalMetaAnno.(map[string]interface{}) + existingAnno, _ := existingMetaAnno.(map[string]interface{}) + + if !reflect.DeepEqual(existingMetaAnno, mergeMaps1(existingAnno, originalAnno)) { + updated = true + if err := unstructured.SetNestedField(existing.Object, originalMetaAnno, "metadata", "annotations"); err != nil { + logger.Error("failed to set metadata annotations to existing object: ", err) + updated = false + } + } + return +} + +func reconcileCustomObjectSpec(existing *unstructured.Unstructured, desired *unstructured.Unstructured) (updated bool) { + updatedSpecMetaLabel, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "metadata", "labels") + originalSpecMetaLabel, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "metadata", "labels") + if !reflect.DeepEqual(updatedSpecMetaLabel, originalSpecMetaLabel) { + updated = true + if err := unstructured.SetNestedField(existing.Object, originalSpecMetaLabel, "spec", "template", "metadata", "labels"); err != nil { + logger.Error("failed to set metadata labels for spec to existing object: ", err) + updated = false + } + } + + updatedSpecMetaAnno, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "metadata", "annotations") + originalSpecMetaAnno, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "metadata", "annotations") + if !reflect.DeepEqual(updatedSpecMetaAnno, originalSpecMetaAnno) { + updated = true + if err := unstructured.SetNestedField(existing.Object, originalSpecMetaAnno, "spec", "template", "metadata", "annotations"); err != nil { + logger.Error("failed to set metadata annotations for spec to existing object: ", err) + updated = false + } + } + + var ( + existingEnv, existingPorts, existingVolumeMount []interface{} + desiredEnv, desiredPorts, desiredVolumeMount []interface{} + desiredName, existingName, desiredImage, existingImage string + desiredArgs, existingArgs []string + existingSecurityContext, desiredSecurityContext, existingResources, desiredResources interface{} + ) + + existingContainersData, _, _ := unstructured.NestedSlice(existing.Object, "spec", "template", "spec", "containers") + for i := range existingContainersData { + existingEnv, _, _ = unstructured.NestedSlice(existingContainersData[i].(map[string]interface{}), "env") + existingArgs, _, _ = unstructured.NestedStringSlice(existingContainersData[i].(map[string]interface{}), "args") + existingImage, _, _ = unstructured.NestedString(existingContainersData[i].(map[string]interface{}), "image") + existingName, _, _ = unstructured.NestedString(existingContainersData[i].(map[string]interface{}), "name") + existingPorts, _, _ = unstructured.NestedSlice(existingContainersData[i].(map[string]interface{}), "ports") + existingVolumeMount, _, _ = unstructured.NestedSlice(existingContainersData[i].(map[string]interface{}), "volumeMounts") + existingSecurityContext, _, _ = unstructured.NestedFieldCopy(existingContainersData[i].(map[string]interface{}), "securityContext") + existingResources, _, _ = unstructured.NestedFieldCopy(existingContainersData[i].(map[string]interface{}), "resources") + } + + desiredContainersData, _, _ := unstructured.NestedSlice(desired.Object, "spec", "template", "spec", "containers") + for i := range desiredContainersData { + desiredEnv, _, _ = unstructured.NestedSlice(desiredContainersData[i].(map[string]interface{}), "env") + desiredArgs, _, _ = unstructured.NestedStringSlice(desiredContainersData[i].(map[string]interface{}), "args") + desiredImage, _, _ = unstructured.NestedString(desiredContainersData[i].(map[string]interface{}), "image") + desiredName, _, _ = unstructured.NestedString(desiredContainersData[i].(map[string]interface{}), "name") + desiredPorts, _, _ = unstructured.NestedSlice(desiredContainersData[i].(map[string]interface{}), "ports") + desiredVolumeMount, _, _ = unstructured.NestedSlice(desiredContainersData[i].(map[string]interface{}), "volumeMounts") + desiredSecurityContext, _, _ = unstructured.NestedFieldCopy(desiredContainersData[i].(map[string]interface{}), "securityContext") + desiredResources, _, _ = unstructured.NestedFieldCopy(desiredContainersData[i].(map[string]interface{}), "resources") + } + + var cUpdated bool + if !reflect.DeepEqual(existingEnv, desiredEnv) { + cUpdated = true + for _, c := range existingEnv { + if err := unstructured.SetNestedSlice(c.(map[string]interface{}), desiredEnv, "env"); err != nil { + logger.Error("failed to set container env to existing object: ", err) + cUpdated = false + } + } + } + if !reflect.DeepEqual(existingArgs, desiredArgs) { + res := make(map[string]interface{}) + cUpdated = true + for _, c := range existingArgs { + res[c] = c + } + if err := unstructured.SetNestedStringSlice(res, desiredArgs, "args"); err != nil { + logger.Error("failed to set container args to existing object: ", err) + cUpdated = false + } + } + if !reflect.DeepEqual(existingImage, desiredImage) { + cUpdated = true + res := make(map[string]interface{}) + res[existingImage] = existingImage + if err := unstructured.SetNestedField(res, desiredImage, "image"); err != nil { + logger.Error("failed to set container image to existing object: ", err) + cUpdated = false + } + } + if !reflect.DeepEqual(existingName, desiredName) { + cUpdated = true + res := make(map[string]interface{}) + res[existingName] = existingName + if err := unstructured.SetNestedField(res, desiredName, "name"); err != nil { + logger.Error("failed to set container name to existing object: ", err) + cUpdated = false + } + } + if !reflect.DeepEqual(existingPorts, desiredPorts) { + cUpdated = true + for _, c := range existingPorts { + if err := unstructured.SetNestedSlice(c.(map[string]interface{}), desiredPorts, "ports"); err != nil { + logger.Error("failed to set container ports to existing object: ", err) + cUpdated = false + } + } + } + if !reflect.DeepEqual(existingVolumeMount, desiredVolumeMount) { + cUpdated = true + for _, c := range existingVolumeMount { + if err := unstructured.SetNestedSlice(c.(map[string]interface{}), desiredVolumeMount, "volumeMounts"); err != nil { + logger.Error("failed to set container volumeMount to existing object: ", err) + cUpdated = false + } + } + } + if !reflect.DeepEqual(existingSecurityContext, desiredSecurityContext) { + cUpdated = true + if err := unstructured.SetNestedField(existingSecurityContext.(map[string]interface{}), desiredSecurityContext, "securityContext"); err != nil { + logger.Error("failed to set container security context to existing object: ", err) + cUpdated = false + } + } + if !reflect.DeepEqual(existingResources, desiredResources) { + cUpdated = true + if err := unstructured.SetNestedField(existingResources.(map[string]interface{}), desiredResources, "resources"); err != nil { + logger.Error("failed to set container resources to existing object: ", err) + cUpdated = false + } + } + if cUpdated { + updated = true + err := unstructured.SetNestedField(existing.Object, desiredContainersData, "spec", "template", "spec", "containers") + if err != nil { + updated = false + } + } + existingSA, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "spec", "serviceAccountName") + desiredSA, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "spec", "serviceAccountName") + if !reflect.DeepEqual(existingSA, desiredSA) { + updated = true + if err := unstructured.SetNestedField(existing.Object, desiredSA, "spec", "template", "spec", "serviceAccountName"); err != nil { + logger.Error("failed to set service account to existing object: ", err) + updated = false + } + } + existingVol, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "spec", "volumes") + desiredVol, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "spec", "volumes") + if !reflect.DeepEqual(existingVol, desiredVol) { + updated = true + if err := unstructured.SetNestedField(existing.Object, desiredVol, "spec", "template", "spec", "volumes"); err != nil { + logger.Error("failed to set volumes to existing object: ", err) + updated = false + } + } + existingTolerations, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "spec", "tolerations") + desiredTolerations, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "spec", "tolerations") + if !reflect.DeepEqual(existingTolerations, desiredTolerations) { + updated = true + if err := unstructured.SetNestedField(existing.Object, desiredTolerations, "spec", "template", "spec", "tolerations"); err != nil { + logger.Error("failed to set tolerations to existing object: ", err) + updated = false + } + } + existingNodeSelector, _, _ := unstructured.NestedFieldCopy(existing.Object, "spec", "template", "spec", "nodeSelector") + desiredNodeSelector, _, _ := unstructured.NestedFieldCopy(desired.Object, "spec", "template", "spec", "nodeSelector") + if !reflect.DeepEqual(existingNodeSelector, desiredNodeSelector) { + updated = true + if err := unstructured.SetNestedField(existing.Object, desiredNodeSelector, "spec", "template", "spec", "nodeSelector"); err != nil { + logger.Error("failed to set nodeSelector to existing object: ", err) + updated = false + } + } + return +} + +func (r *Reconciler) reconcileCustomObject(ctx context.Context, logger *zap.SugaredLogger, el *v1alpha1.EventListener) error { + // check logging config, create if it doesn't exist + if err := r.reconcileLoggingConfig(ctx, logger, el); err != nil { + logger.Error(err) + return err + } + + original := duckv1.WithPod{} + decoder := json.NewDecoder(bytes.NewBuffer(el.Spec.Resources.CustomResource.Raw)) + if err := decoder.Decode(&original); err != nil { + logger.Errorf("unable to decode object", err) + return err + } + + customObjectData := original.DeepCopy() + + namespace := original.GetNamespace() + // Default the resource creation to the EventListenerNamespace if not found in the resource object + if namespace == "" { + namespace = el.GetNamespace() + } + + env := []corev1.EnvVar{{ + Name: "SYSTEM_NAMESPACE", + Value: el.Namespace, + }, { + Name: "TEKTON_INSTALL_NAMESPACE", + Value: system.GetNamespace(), + }} + + var resource corev1.ResourceRequirements + if len(original.Spec.Template.Spec.Containers) == 1 { + for i := range original.Spec.Template.Spec.Containers[0].Env { + env = append(env, original.Spec.Template.Spec.Containers[0].Env[i]) + } + resource = original.Spec.Template.Spec.Containers[0].Resources + } + + isMultiNS := false + if len(el.Spec.NamespaceSelector.MatchNames) != 0 { + isMultiNS = true + } + vMount := []corev1.VolumeMount{{ + Name: "config-logging", + MountPath: "/etc/config-logging", + ReadOnly: true, + }} + + container := corev1.Container{ + Name: "event-listener", + Image: *r.config.Image, + Ports: []corev1.ContainerPort{{ + ContainerPort: int32(eventListenerContainerPort), + Protocol: corev1.ProtocolTCP, + }}, + Resources: resource, + Args: []string{ + "--el-name=" + el.Name, + "--el-namespace=" + el.Namespace, + "--port=" + strconv.Itoa(eventListenerContainerPort), + "--readtimeout=" + strconv.FormatInt(*r.config.ReadTimeOut, 10), + "--writetimeout=" + strconv.FormatInt(*r.config.WriteTimeOut, 10), + "--idletimeout=" + strconv.FormatInt(*r.config.IdleTimeOut, 10), + "--timeouthandler=" + strconv.FormatInt(*r.config.TimeOutHandler, 10), + "--is-multi-ns=" + strconv.FormatBool(isMultiNS), + }, + VolumeMounts: vMount, + Env: env, + } + + podlabels := mergeMaps(el.Labels, GenerateResourceLabels(el.Name, r.config.StaticResourceLabels)) + + podlabels = mergeMaps(podlabels, customObjectData.Labels) + + original.Labels = podlabels + original.Annotations = customObjectData.Annotations + original.Spec.Template.ObjectMeta = metav1.ObjectMeta{ + Name: customObjectData.Spec.Template.Name, + Labels: customObjectData.Spec.Template.Labels, + Annotations: customObjectData.Spec.Template.Annotations, + } + original.Spec.Template.Spec = corev1.PodSpec{ + Tolerations: customObjectData.Spec.Template.Spec.Tolerations, + NodeSelector: customObjectData.Spec.Template.Spec.NodeSelector, + ServiceAccountName: customObjectData.Spec.Template.Spec.ServiceAccountName, + Containers: []corev1.Container{container}, + Volumes: []corev1.Volume{{ + Name: "config-logging", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: eventListenerConfigMapName, + }, + }, + }, + }}, + } + marshaledData, err := json.Marshal(original) + if err != nil { + logger.Errorf("failed to marshal custom object", err) + return err + } + data := new(unstructured.Unstructured) + if err := data.UnmarshalJSON(marshaledData); err != nil { + logger.Errorf("failed to unmarshal to unstructured object", err) + return err + } + + if data.GetName() == "" { + data.SetName(el.Status.Configuration.GeneratedResourceName) + } + gvr, _ := meta.UnsafeGuessKindToResource(data.GetObjectKind().GroupVersionKind()) + + data.SetOwnerReferences([]metav1.OwnerReference{*el.GetOwnerReference()}) + + var watchError error + r.onlyOnce.Do(func() { + watchError = r.podspecableTracker.WatchOnDynamicObject(ctx, gvr) + }) + if watchError != nil { + logger.Errorf("failed to watch on created custom object", watchError) + return err + } + + existingCustomObject, err := r.DynamicClientSet.Resource(gvr).Namespace(namespace).Get(ctx, data.GetName(), metav1.GetOptions{}) + switch { + case err == nil: + statusData, ok, err := unstructured.NestedMap(existingCustomObject.Object, "status") + if !ok { + // No status in the created object, it is weird but let's not fail + return nil + } + if err != nil { + return err + } + conditionData, ok, err := unstructured.NestedFieldCopy(statusData, "conditions") + if !ok { + // No conditions in the created object, it is weird but let's not fail + return nil + } + if err != nil { + return err + } + generationData, ok, err := unstructured.NestedInt64(statusData, "observedGeneration") + if !ok { + // No observedGeneration in the created object, it is weird but let's not fail + return nil + } + if err != nil { + return err + } + // set generation value to el status + el.Status.ObservedGeneration = generationData + cMarshalledData, err := json.Marshal(conditionData) + if err != nil { + return err + } + var customConditions v1beta1.Conditions + if err = json.Unmarshal(cMarshalledData, &customConditions); err != nil { + return err + } + el.Status.SetConditionsForDynamicObjects(customConditions) + + url := statusData["url"] + if url != nil { + urlData := strings.Split(fmt.Sprintf("%v", url), "//") + el.Status.SetAddress(fmt.Sprintf("%v:8080", urlData[1])) + } + + if reconcileCustomObjectMeta(existingCustomObject, data) || reconcileCustomObjectSpec(existingCustomObject, data) { + if _, err := r.DynamicClientSet.Resource(gvr).Namespace(namespace).Update(ctx, existingCustomObject, metav1.UpdateOptions{}); err != nil { + logger.Errorf("Error updating to eventListener custom object: ", err) + return err + } + logger.Infof("Updated EventListener Custom Object %s in Namespace %s", data.GetName(), el.Namespace) + } + case errors.IsNotFound(err): + createDynamicObject, err := r.DynamicClientSet.Resource(gvr).Namespace(namespace).Create(ctx, data, metav1.CreateOptions{}) + el.Status.SetExistsCondition(v1alpha1.KnativeServiceExists, err) + if err != nil { + logger.Errorf("Error creating EventListener Dynamic object: ", err) + return err + } + logger.Infof("Created EventListener Deployment %s in Namespace %s", createDynamicObject.GetName(), el.Namespace) + default: + logger.Error(err) + return err + } + return nil +} + func getDeployment(el *v1alpha1.EventListener, c Config) *appsv1.Deployment { var replicas = ptr.Int32(1) if el.Spec.Replicas != nil { @@ -638,6 +1088,19 @@ func mergeMaps(m1, m2 map[string]string) map[string]string { return merged } +// mergeMaps merges the values in the passed maps into a new map. +// Values within m2 potentially clobber m1 values. +func mergeMaps1(m1, m2 map[string]interface{}) map[string]interface{} { + merged := make(map[string]interface{}, len(m1)+len(m2)) + for k, v := range m1 { + merged[k] = v + } + for k, v := range m2 { + merged[k] = v + } + return merged +} + // wrapError wraps errors together. If one of the errors is nil, the other is // returned. func wrapError(err1, err2 error) error { diff --git a/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go b/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go index 9ebbd1311d..438ce4c6f6 100644 --- a/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go +++ b/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go @@ -18,6 +18,7 @@ package eventlistener import ( "context" + "encoding/json" "fmt" "os" "strconv" @@ -26,6 +27,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + logger "github.com/sirupsen/logrus" "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" "github.com/tektoncd/triggers/pkg/system" "github.com/tektoncd/triggers/test" @@ -34,6 +36,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -48,8 +51,9 @@ import ( ) var ( - eventListenerName = "my-eventlistener" - generatedResourceName = fmt.Sprintf("el-%s", eventListenerName) + eventListenerName = "my-eventlistener" + generatedResourceName = fmt.Sprintf("el-%s", eventListenerName) + generatedCustomResourceName = fmt.Sprintf("el-custom-%s", eventListenerName) namespace = "test-pipelines" namespaceResource = &corev1.Namespace{ @@ -380,6 +384,104 @@ var withTLSConfig = func(d *appsv1.Deployment) { }} } +// makePodSpec is a helper to build a Knative Service that is created by an EventListener. +// It generates a basic Knative Service for the simplest EventListener and accepts functions for modification +func makeCustomData(ops ...func(d *unstructured.Unstructured)) *unstructured.Unstructured { + ownerRefs := makeEL().GetOwnerReference() + + d := duckv1.WithPod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "serving.knative.dev/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: generatedCustomResourceName, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + *ownerRefs, + }, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "EventListener", + "app.kubernetes.io/part-of": "Triggers", + "eventlistener": eventListenerName, + "serving.knative.dev/visibility": "cluster-local", + }, + }, + Spec: duckv1.WithPodSpec{ + Template: duckv1.PodSpecable{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rev1", + }, + Spec: corev1.PodSpec{ + ServiceAccountName: "tekton-triggers-example-sa", + Containers: []corev1.Container{{ + Name: "event-listener", + Image: DefaultImage, + Ports: []corev1.ContainerPort{{ + ContainerPort: int32(eventListenerContainerPort), + Protocol: corev1.ProtocolTCP, + }}, + Args: []string{ + "--el-name=" + eventListenerName, + "--el-namespace=" + namespace, + "--port=" + strconv.Itoa(eventListenerContainerPort), + "--readtimeout=" + strconv.FormatInt(DefaultReadTimeout, 10), + "--writetimeout=" + strconv.FormatInt(DefaultWriteTimeout, 10), + "--idletimeout=" + strconv.FormatInt(DefaultIdleTimeout, 10), + "--timeouthandler=" + strconv.FormatInt(DefaultTimeOutHandler, 10), + "--is-multi-ns=" + strconv.FormatBool(false), + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: "config-logging", + MountPath: "/etc/config-logging", + ReadOnly: true, + }}, + Env: []corev1.EnvVar{{ + Name: "SYSTEM_NAMESPACE", + Value: "test-pipelines", + }, { + Name: "TEKTON_INSTALL_NAMESPACE", + Value: "tekton-pipelines", + }, { + Name: "key", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "test"}, + Key: "a.crt", + }, + }, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "config-logging", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: eventListenerConfigMapName, + }, + }, + }, + }}, + }, + }, + }, + } + + marshaledData, err := json.Marshal(d) + if err != nil { + logger.Errorf("failed to marshal custom object %v", err) + } + data := new(unstructured.Unstructured) + if err := data.UnmarshalJSON(marshaledData); err != nil { + logger.Errorf("failed to unmarshal to unstructured object %v", err) + } + + for _, op := range ops { + op(data) + } + return data +} + // makeService is a helper to build a Service that is created by an EventListener. // It generates a basic Service for the simplest EventListener and accepts functions for modification. func makeService(ops ...func(*corev1.Service)) *corev1.Service { @@ -423,6 +525,24 @@ var withTLSPort = bldr.EventListenerStatus( bldr.EventListenerAddress(listenerHostname(generatedResourceName, namespace, 8443)), ) +var withKnativeStatus = bldr.EventListenerStatus( + bldr.EventListenerCondition( + v1alpha1.ServiceExists, + corev1.ConditionFalse, + "", "", + ), + bldr.EventListenerCondition( + v1alpha1.DeploymentExists, + corev1.ConditionFalse, + "", "", + ), + bldr.EventListenerCondition( + v1alpha1.KnativeServiceExists, + corev1.ConditionTrue, + "Knative Service exists", "", + ), +) + var withStatus = bldr.EventListenerStatus( bldr.EventListenerConfig(generatedResourceName), bldr.EventListenerAddress(listenerHostname(generatedResourceName, namespace, DefaultPort)), @@ -436,6 +556,11 @@ var withStatus = bldr.EventListenerStatus( corev1.ConditionTrue, "Deployment exists", "", ), + bldr.EventListenerCondition( + v1alpha1.KnativeServiceExists, + corev1.ConditionFalse, + "", "", + ), bldr.EventListenerCondition( apis.ConditionType(appsv1.DeploymentAvailable), corev1.ConditionTrue, @@ -562,6 +687,40 @@ func TestReconcile(t *testing.T) { } }) + elWithCustomResource := makeEL(withStatus, withKnativeStatus, func(el *v1alpha1.EventListener) { + el.Spec.Resources.CustomResource = &v1alpha1.CustomResource{ + RawExtension: test.RawExtension(t, duckv1.WithPod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "serving.knative.dev/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: generatedCustomResourceName, + Labels: map[string]string{"serving.knative.dev/visibility": "cluster-local"}, + }, + Spec: duckv1.WithPodSpec{Template: duckv1.PodSpecable{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rev1", + }, + Spec: corev1.PodSpec{ + ServiceAccountName: "tekton-triggers-example-sa", + Containers: []corev1.Container{{ + Env: []corev1.EnvVar{{ + Name: "key", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "test"}, + Key: "a.crt", + }, + }, + }}, + }}, + }, + }}, + }), + } + }) + elWithTLSConnection := makeEL(withStatus, withTLSPort, func(el *v1alpha1.EventListener) { el.Spec.Resources.KubernetesResource = &v1alpha1.KubernetesResource{ WithPodSpec: duckv1.WithPodSpec{ @@ -719,351 +878,351 @@ func TestReconcile(t *testing.T) { config *Config // Config of the reconciler startResources test.Resources // State of the world before we call Reconcile endResources test.Resources // Expected State of the world after calling Reconcile - }{{ - name: "eventlistener creation", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL()}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, - Deployments: []*appsv1.Deployment{makeDeployment()}, - Services: []*corev1.Service{makeService()}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with additional label", - key: reconcileKey, - // Resources before reconcile starts: EL has extra label that deployment/svc does not - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedLabels)}, - Deployments: []*appsv1.Deployment{makeDeployment()}, - Services: []*corev1.Service{makeService()}, - }, - // We expect the deployment and services to propagate the extra label - // but the selectors in both Service and deployment should have the same - // label - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedLabels)}, - Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, - Services: []*corev1.Service{elServiceWithLabels}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with additional annotation", - key: reconcileKey, - // Resources before reconcile starts: EL has annotation that deployment/svc does not - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedAnnotations)}, - Deployments: []*appsv1.Deployment{makeDeployment()}, - Services: []*corev1.Service{makeService()}, - }, - // We expect the deployment and services to propagate the annotations - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedAnnotations)}, - Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, - Services: []*corev1.Service{elServiceWithAnnotation}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with updated service account", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithUpdatedSA}, - Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithUpdatedSA}, - Deployments: []*appsv1.Deployment{elDeploymentWithUpdatedSA}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with added tolerations", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithTolerations}, - Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithTolerations}, - Deployments: []*appsv1.Deployment{elDeploymentWithTolerations}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with added NodeSelector", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodeSelector}, - Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodeSelector}, - Deployments: []*appsv1.Deployment{elDeploymentWithNodeSelector}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with NodePort service", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elServiceWithLabels}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elServiceTypeNodePort}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Check that if a user manually updates the labels for a service, we revert the change. - name: "eventlistener with labels added to service", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Services: []*corev1.Service{elServiceWithLabels}, - Deployments: []*appsv1.Deployment{elDeployment}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Services: []*corev1.Service{elService}, // We expect the service to drop the user added labels - Deployments: []*appsv1.Deployment{elDeployment}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Check that if a user manually updates the annotations for a service, we do not revert the change. - name: "eventlistener with annotations added to service", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Services: []*corev1.Service{elServiceWithAnnotation}, - Deployments: []*appsv1.Deployment{elDeployment}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Services: []*corev1.Service{elServiceWithAnnotation}, - Deployments: []*appsv1.Deployment{elDeployment}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Checks that EL reconciler does not overwrite NodePort set by k8s (see #167) - name: "eventlistener with updated NodePort service", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elServiceWithUpdatedNodePort}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elServiceWithUpdatedNodePort}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with labels applied to deployment", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Check that if a user manually updates the annotations for a deployment, we do not revert the change. - name: "eventlistener with annotations applied to deployment", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Updating replicas on deployment itself is success because no replicas provided as part of eventlistener spec - name: "eventlistener with updated replicas on deployment", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with failed update to deployment replicas", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithDeploymentReplicaFailure}, - Services: []*corev1.Service{elService}, - Deployments: []*appsv1.Deployment{elDeployment}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with updated config volumes", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, - Deployments: []*appsv1.Deployment{deploymentMissingVolumes}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - // Checks that we do not overwrite replicas changed on the deployment itself when replicas provided as part of eventlistener spec - name: "eventlistener with updated replicas", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithReplicas}, - Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, - Services: []*corev1.Service{elService}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithReplicas}, - Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicasNotConsidered}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with kubernetes resource", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithKubernetesResource}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithKubernetesResource}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - Deployments: []*appsv1.Deployment{deploymentForKubernetesResource}, - Services: []*corev1.Service{elServiceTypeNodePort}, - }, - }, { - name: "eventlistener with kubernetes resource for podtemplate objectmeta", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithKubernetesResourceForObjectMeta}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithKubernetesResourceForObjectMeta}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - Deployments: []*appsv1.Deployment{deploymentForKubernetesResourceObjectMeta}, - Services: []*corev1.Service{elService}, - }, - }, { - name: "eventlistener with TLS connection", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithTLSConnection}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithTLSConnection}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - Deployments: []*appsv1.Deployment{deploymentWithTLSConnection}, - Services: []*corev1.Service{elServiceWithTLSConnection}, - }, - }, { - name: "eventlistener with security context", - key: reconcileKey, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{elDeployment}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, { - name: "eventlistener with SetSecurityContext false", - key: reconcileKey, - config: configWithSetSecurityContextFalse, - startResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - endResources: test.Resources{ - Namespaces: []*corev1.Namespace{namespaceResource}, - EventListeners: []*v1alpha1.EventListener{elWithStatus}, - Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, - Services: []*corev1.Service{elService}, - ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, - }, - }, + }{ { + name: "eventlistener creation", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL()}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, + Deployments: []*appsv1.Deployment{makeDeployment()}, + Services: []*corev1.Service{makeService()}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with additional label", + key: reconcileKey, + // Resources before reconcile starts: EL has extra label that deployment/svc does not + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedLabels)}, + Deployments: []*appsv1.Deployment{makeDeployment()}, + Services: []*corev1.Service{makeService()}, + }, + // We expect the deployment and services to propagate the extra label + // but the selectors in both Service and deployment should have the same + // label + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedLabels)}, + Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, + Services: []*corev1.Service{elServiceWithLabels}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with additional annotation", + key: reconcileKey, + // Resources before reconcile starts: EL has annotation that deployment/svc does not + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedAnnotations)}, + Deployments: []*appsv1.Deployment{makeDeployment()}, + Services: []*corev1.Service{makeService()}, + }, + // We expect the deployment and services to propagate the annotations + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus, withAddedAnnotations)}, + Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, + Services: []*corev1.Service{elServiceWithAnnotation}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with updated service account", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithUpdatedSA}, + Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithUpdatedSA}, + Deployments: []*appsv1.Deployment{elDeploymentWithUpdatedSA}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with added tolerations", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithTolerations}, + Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithTolerations}, + Deployments: []*appsv1.Deployment{elDeploymentWithTolerations}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with added NodeSelector", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodeSelector}, + Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodeSelector}, + Deployments: []*appsv1.Deployment{elDeploymentWithNodeSelector}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with NodePort service", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elServiceWithLabels}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elServiceTypeNodePort}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Check that if a user manually updates the labels for a service, we revert the change. + name: "eventlistener with labels added to service", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Services: []*corev1.Service{elServiceWithLabels}, + Deployments: []*appsv1.Deployment{elDeployment}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Services: []*corev1.Service{elService}, // We expect the service to drop the user added labels + Deployments: []*appsv1.Deployment{elDeployment}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Check that if a user manually updates the annotations for a service, we do not revert the change. + name: "eventlistener with annotations added to service", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Services: []*corev1.Service{elServiceWithAnnotation}, + Deployments: []*appsv1.Deployment{elDeployment}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Services: []*corev1.Service{elServiceWithAnnotation}, + Deployments: []*appsv1.Deployment{elDeployment}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Checks that EL reconciler does not overwrite NodePort set by k8s (see #167) + name: "eventlistener with updated NodePort service", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elServiceWithUpdatedNodePort}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithNodePortServiceType}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elServiceWithUpdatedNodePort}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with labels applied to deployment", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeploymentWithLabels}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Check that if a user manually updates the annotations for a deployment, we do not revert the change. + name: "eventlistener with annotations applied to deployment", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeploymentWithAnnotations}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Updating replicas on deployment itself is success because no replicas provided as part of eventlistener spec + name: "eventlistener with updated replicas on deployment", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with failed update to deployment replicas", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithDeploymentReplicaFailure}, + Services: []*corev1.Service{elService}, + Deployments: []*appsv1.Deployment{elDeployment}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with updated config volumes", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, + Deployments: []*appsv1.Deployment{deploymentMissingVolumes}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{makeEL(withStatus)}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + // Checks that we do not overwrite replicas changed on the deployment itself when replicas provided as part of eventlistener spec + name: "eventlistener with updated replicas", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithReplicas}, + Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicas}, + Services: []*corev1.Service{elService}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithReplicas}, + Deployments: []*appsv1.Deployment{deploymentWithUpdatedReplicasNotConsidered}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with kubernetes resource", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithKubernetesResource}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithKubernetesResource}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + Deployments: []*appsv1.Deployment{deploymentForKubernetesResource}, + Services: []*corev1.Service{elServiceTypeNodePort}, + }, + }, { + name: "eventlistener with kubernetes resource for podtemplate objectmeta", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithKubernetesResourceForObjectMeta}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithKubernetesResourceForObjectMeta}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + Deployments: []*appsv1.Deployment{deploymentForKubernetesResourceObjectMeta}, + Services: []*corev1.Service{elService}, + }, + }, { + name: "eventlistener with TLS connection", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithTLSConnection}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithTLSConnection}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + Deployments: []*appsv1.Deployment{deploymentWithTLSConnection}, + Services: []*corev1.Service{elServiceWithTLSConnection}, + }, + }, { + name: "eventlistener with security context", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{elDeployment}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { + name: "eventlistener with SetSecurityContext false", + key: reconcileKey, + config: configWithSetSecurityContextFalse, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithStatus}, + Deployments: []*appsv1.Deployment{deploymentMissingSecurityContext}, + Services: []*corev1.Service{elService}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + }, { name: "eventlistener with port set in config", key: reconcileKey, config: configWithPortSet, @@ -1081,6 +1240,21 @@ func TestReconcile(t *testing.T) { ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, }, }, + { + name: "eventlistener with custome resource", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithCustomResource}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{elWithCustomResource}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + CustomData: []*unstructured.Unstructured{makeCustomData()}, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/resources/create.go b/pkg/resources/create.go index e2b26216ec..72c0c39c30 100644 --- a/pkg/resources/create.go +++ b/pkg/resources/create.go @@ -35,7 +35,7 @@ import ( discoveryclient "k8s.io/client-go/discovery" ) -// findAPIResource returns the APIResource definition using the discovery client c. +// FindAPIResource returns the APIResource definition using the discovery client c. func findAPIResource(apiVersion, kind string, c discoveryclient.ServerResourcesInterface) (*metav1.APIResource, error) { resourceList, err := c.ServerResourcesForGroupVersion(apiVersion) if err != nil { diff --git a/test/controller.go b/test/controller.go index af93ccd7e2..b042b376a4 100644 --- a/test/controller.go +++ b/test/controller.go @@ -18,9 +18,11 @@ package test import ( "context" + "encoding/json" "testing" // Link in the fakes so they get injected into injection.Fake + logger "github.com/sirupsen/logrus" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" @@ -37,10 +39,15 @@ import ( faketriggertemplateinformer "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/fake" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" fakedynamic "k8s.io/client-go/dynamic/fake" fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "knative.dev/pkg/apis/duck" + duckinformerfake "knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake" fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" fakedeployinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/fake" fakeconfigmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake" @@ -49,6 +56,7 @@ import ( fakeserviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake" "knative.dev/pkg/controller" + fakedynamicclientset "knative.dev/pkg/injection/clients/dynamicclient/fake" ) // Resources represents the desired state of the system (i.e. existing resources) @@ -66,15 +74,18 @@ type Resources struct { Secrets []*corev1.Secret ServiceAccounts []*corev1.ServiceAccount Pods []*corev1.Pod + CustomData []*unstructured.Unstructured } // Clients holds references to clients which are useful for reconciler tests. type Clients struct { - Kube *fakekubeclientset.Clientset - Triggers *faketriggersclientset.Clientset - Pipeline *fakepipelineclientset.Clientset - Resource *fakeresourceclientset.Clientset - Dynamic *dynamicclientset.Clientset + Kube *fakekubeclientset.Clientset + Triggers *faketriggersclientset.Clientset + Pipeline *fakepipelineclientset.Clientset + Resource *fakeresourceclientset.Clientset + Dynamic *dynamicclientset.Clientset + DynamicClient *fakedynamic.FakeDynamicClient + DuckInformerFactory duck.InformerFactory } // Assets holds references to the controller and clients. @@ -89,11 +100,12 @@ func SeedResources(t *testing.T, ctx context.Context, r Resources) Clients { t.Helper() dynamicClient := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) c := Clients{ - Kube: fakekubeclient.Get(ctx), - Triggers: faketriggersclient.Get(ctx), - Pipeline: fakepipelineclient.Get(ctx), - Resource: fakeresourceclient.Get(ctx), - Dynamic: dynamicclientset.New(tekton.WithClient(dynamicClient)), + Kube: fakekubeclient.Get(ctx), + Triggers: faketriggersclient.Get(ctx), + Pipeline: fakepipelineclient.Get(ctx), + Resource: fakeresourceclient.Get(ctx), + Dynamic: dynamicclientset.New(tekton.WithClient(dynamicClient)), + DynamicClient: fakedynamicclientset.Get(ctx), } // Teach Kube clients about the Tekton resources (needed by discovery client when creating resources) @@ -111,6 +123,7 @@ func SeedResources(t *testing.T, ctx context.Context, r Resources) Clients { secretInformer := fakesecretinformer.Get(ctx) saInformer := fakeserviceaccountinformer.Get(ctx) podInformer := fakepodinformer.Get(ctx) + duckInformerFactory := duckinformerfake.Get(ctx) // Create Namespaces for _, ns := range r.Namespaces { @@ -210,9 +223,35 @@ func SeedResources(t *testing.T, ctx context.Context, r Resources) Clients { } } + for _, d := range r.CustomData { + marshaledData, err := json.Marshal(d) + if err != nil { + logger.Errorf("failed to marshal custom object %v ", err) + t.Fatal(err) + } + data := new(unstructured.Unstructured) + if err := data.UnmarshalJSON(marshaledData); err != nil { + logger.Errorf("failed to unmarshal to unstructured object %v ", err) + t.Fatal(err) + } + gvr, _ := meta.UnsafeGuessKindToResource(data.GetObjectKind().GroupVersionKind()) + shInformer, _, err := duckInformerFactory.Get(ctx, gvr) + if err != nil { + t.Fatal(err) + } + if err := shInformer.GetIndexer().Add(d); err != nil { + t.Fatal(err) + } + dynamicInterface := c.DynamicClient.Resource(gvr) + if _, err := dynamicInterface.Create(context.Background(), data, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + } + c.Kube.ClearActions() c.Triggers.ClearActions() c.Pipeline.ClearActions() + c.DynamicClient.ClearActions() return c } @@ -308,6 +347,20 @@ func GetResourcesFromClients(c Clients) (*Resources, error) { for _, pod := range podList.Items { testResources.Pods = append(testResources.Pods, pod.DeepCopy()) } + // Hardcode GVR for custom resource test + gvr := schema.GroupVersionResource{ + Group: "serving.knative.dev", + Version: "v1", + Resource: "services", + } + dynamicInterface := c.DynamicClient.Resource(gvr) + customData, err := dynamicInterface.List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, cData := range customData.Items { + testResources.CustomData = append(testResources.CustomData, cData.DeepCopy()) + } } return testResources, nil diff --git a/test/controller_test.go b/test/controller_test.go index 727f3e44ee..e6fa0c743b 100644 --- a/test/controller_test.go +++ b/test/controller_test.go @@ -17,13 +17,17 @@ limitations under the License. package test import ( + "encoding/json" "testing" "github.com/google/go-cmp/cmp" + logger "github.com/sirupsen/logrus" "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + duckv1 "knative.dev/pkg/apis/duck/v1" rtesting "knative.dev/pkg/reconciler/testing" ) @@ -145,6 +149,25 @@ func TestGetResourcesFromClients(t *testing.T) { }, } + cData := duckv1.WithPod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "serving.knative.dev/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "knativeservice", + }, + } + + marshaledData, err := json.Marshal(cData) + if err != nil { + logger.Errorf("failed to marshal custom object %v", err) + } + data := new(unstructured.Unstructured) + if err := data.UnmarshalJSON(marshaledData); err != nil { + logger.Errorf("failed to unmarshal to unstructured object %v", err) + } + tests := []struct { name string Resources Resources @@ -165,6 +188,7 @@ func TestGetResourcesFromClients(t *testing.T) { Deployments: []*appsv1.Deployment{deployment1}, Services: []*corev1.Service{service1}, Pods: []*corev1.Pod{pod1}, + CustomData: []*unstructured.Unstructured{data}, }, }, { diff --git a/test/wait.go b/test/wait.go index 10887e2033..773021000e 100644 --- a/test/wait.go +++ b/test/wait.go @@ -34,10 +34,10 @@ package test import ( "context" - "time" - "testing" + "time" + "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -75,8 +75,11 @@ func eventListenerReady(t *testing.T, c *clients, namespace, name string) wait.C return false, nil } for _, cond := range el.Status.Conditions { - if cond.Status != corev1.ConditionTrue { - return false, nil + // Once we have this fix https://github.com/tektoncd/triggers/issues/932 we can avoid these multiple conditions + if cond.Type != v1alpha1.KnativeServiceExists { + if cond.Status != corev1.ConditionTrue { + return false, nil + } } } return true, nil diff --git a/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake/fake.go b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake/fake.go new file mode 100644 index 0000000000..b40daf97dc --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake/fake.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + podspecable "knative.dev/pkg/client/injection/ducks/duck/v1/podspecable" + injection "knative.dev/pkg/injection" +) + +var Get = podspecable.Get + +func init() { + injection.Fake.RegisterDuck(podspecable.WithDuck) +} diff --git a/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/podspecable.go b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/podspecable.go new file mode 100644 index 0000000000..9ae0971da6 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/podspecable.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package podspecable + +import ( + context "context" + + duck "knative.dev/pkg/apis/duck" + v1 "knative.dev/pkg/apis/duck/v1" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + dynamicclient "knative.dev/pkg/injection/clients/dynamicclient" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterDuck(WithDuck) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithDuck(ctx context.Context) context.Context { + dc := dynamicclient.Get(ctx) + dif := &duck.CachedInformerFactory{ + Delegate: &duck.TypedInformerFactory{ + Client: dc, + Type: (&v1.PodSpecable{}).GetFullType(), + ResyncPeriod: controller.GetResyncPeriod(ctx), + StopChannel: ctx.Done(), + }, + } + return context.WithValue(ctx, Key{}, dif) +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) duck.InformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/pkg/apis/duck.InformerFactory from context.") + } + return untyped.(duck.InformerFactory) +} diff --git a/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go b/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go new file mode 100644 index 0000000000..2eece5c555 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/clients/dynamicclient/dynamicclient.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamicclient + +import ( + "context" + + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "knative.dev/pkg/injection" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, dynamic.NewForConfigOrDie(cfg)) +} + +// Get extracts the Dynamic client from the context. +func Get(ctx context.Context) dynamic.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/dynamic.Interface from context.") + } + return untyped.(dynamic.Interface) +} diff --git a/vendor/knative.dev/pkg/injection/clients/dynamicclient/fake/fake.go b/vendor/knative.dev/pkg/injection/clients/dynamicclient/fake/fake.go new file mode 100644 index 0000000000..12670edd2b --- /dev/null +++ b/vendor/knative.dev/pkg/injection/clients/dynamicclient/fake/fake.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/rest" + + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/clients/dynamicclient" + "knative.dev/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx, runtime.NewScheme()) + return ctx +} + +func With(ctx context.Context, scheme *runtime.Scheme, objects ...runtime.Object) (context.Context, *fake.FakeDynamicClient) { + cs := fake.NewSimpleDynamicClient(scheme, objects...) + return context.WithValue(ctx, dynamicclient.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.FakeDynamicClient { + untyped := ctx.Value(dynamicclient.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (*fake.FakeDynamicClient)(nil)) + } + return untyped.(*fake.FakeDynamicClient) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8720d15602..3d9d28f980 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -266,6 +266,7 @@ github.com/rogpeppe/go-internal/modfile github.com/rogpeppe/go-internal/module github.com/rogpeppe/go-internal/semver # github.com/sirupsen/logrus v1.7.0 +## explicit github.com/sirupsen/logrus # github.com/spf13/cobra v1.0.0 ## explicit @@ -970,6 +971,8 @@ knative.dev/pkg/apis/duck/v1 knative.dev/pkg/apis/duck/v1alpha1 knative.dev/pkg/apis/duck/v1beta1 knative.dev/pkg/changeset +knative.dev/pkg/client/injection/ducks/duck/v1/podspecable +knative.dev/pkg/client/injection/ducks/duck/v1/podspecable/fake knative.dev/pkg/client/injection/kube/client knative.dev/pkg/client/injection/kube/client/fake knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration @@ -996,6 +999,8 @@ knative.dev/pkg/configmap/informer knative.dev/pkg/controller knative.dev/pkg/hash knative.dev/pkg/injection +knative.dev/pkg/injection/clients/dynamicclient +knative.dev/pkg/injection/clients/dynamicclient/fake knative.dev/pkg/injection/clients/namespacedkube/informers/core/v1/secret knative.dev/pkg/injection/clients/namespacedkube/informers/factory knative.dev/pkg/injection/sharedmain