diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk index 8c08ceade..2bb44be4f 100644 --- a/.bingo/Variables.mk +++ b/.bingo/Variables.mk @@ -23,6 +23,12 @@ $(BINGO): $(BINGO_DIR)/bingo.mod @echo "(re)installing $(GOBIN)/bingo-v0.9.0" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.9.0 "github.com/bwplotka/bingo" +CONFTEST := $(GOBIN)/conftest-v0.62.0 +$(CONFTEST): $(BINGO_DIR)/conftest.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/conftest-v0.62.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=conftest.mod -o=$(GOBIN)/conftest-v0.62.0 "github.com/open-policy-agent/conftest" + CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.19.0 $(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. @@ -47,17 +53,17 @@ $(GOJQ): $(BINGO_DIR)/gojq.mod @echo "(re)installing $(GOBIN)/gojq-v0.12.17" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gojq.mod -o=$(GOBIN)/gojq-v0.12.17 "github.com/itchyny/gojq/cmd/gojq" -GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.6.2 +GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.7.2 $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/golangci-lint-v2.6.2" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.6.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" + @echo "(re)installing $(GOBIN)/golangci-lint-v2.7.2" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.7.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" -GORELEASER := $(GOBIN)/goreleaser-v1.26.2 +GORELEASER := $(GOBIN)/goreleaser-v2.11.2 $(GORELEASER): $(BINGO_DIR)/goreleaser.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/goreleaser-v1.26.2" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v1.26.2 "github.com/goreleaser/goreleaser" + @echo "(re)installing $(GOBIN)/goreleaser-v2.11.2" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v2.11.2 "github.com/goreleaser/goreleaser/v2" HELM := $(GOBIN)/helm-v3.18.4 $(HELM): $(BINGO_DIR)/helm.mod @@ -71,6 +77,12 @@ $(KIND): $(BINGO_DIR)/kind.mod @echo "(re)installing $(GOBIN)/kind-v0.30.0" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kind.mod -o=$(GOBIN)/kind-v0.30.0 "sigs.k8s.io/kind" +KUBE_SCORE := $(GOBIN)/kube-score-v1.20.0 +$(KUBE_SCORE): $(BINGO_DIR)/kube-score.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/kube-score-v1.20.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kube-score.mod -o=$(GOBIN)/kube-score-v1.20.0 "github.com/zegl/kube-score/cmd/kube-score" + KUSTOMIZE := $(GOBIN)/kustomize-v5.7.1 $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. diff --git a/.bingo/conftest.mod b/.bingo/conftest.mod new file mode 100644 index 000000000..294b93132 --- /dev/null +++ b/.bingo/conftest.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.6 + +require github.com/open-policy-agent/conftest v0.62.0 diff --git a/.bingo/conftest.sum b/.bingo/conftest.sum new file mode 100644 index 000000000..b34a3b44b --- /dev/null +++ b/.bingo/conftest.sum @@ -0,0 +1,2041 @@ +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= +cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= +cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cuelang.org/go v0.10.0 h1:Y1Pu4wwga5HkXfLFK1sWAYaSWIBdcsr5Cb5AWj2pOuE= +cuelang.org/go v0.10.0/go.mod h1:HzlaqqqInHNiqE6slTP6+UtxT9hN6DAzgJgdbNxXvX8= +cuelang.org/go v0.13.2 h1:SagzeEASX4E2FQnRbItsqa33sSelrJjQByLqH9uZCE8= +cuelang.org/go v0.13.2/go.mod h1:8MoQXu+RcXsa2s9mebJN1HJ1orVDc9aI9/yKi6Dzsi4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CycloneDX/cyclonedx-go v0.9.1 h1:yffaWOZsv77oTJa/SdVZYdgAgFioCeycBUKkqS2qzQM= +github.com/CycloneDX/cyclonedx-go v0.9.1/go.mod h1:NE/EWvzELOFlG6+ljX/QeMlVt9VKcTwu8u0ccsACEsw= +github.com/CycloneDX/cyclonedx-go v0.9.2 h1:688QHn2X/5nRezKe2ueIVCt+NRqf7fl3AVQk+vaFcIo= +github.com/CycloneDX/cyclonedx-go v0.9.2/go.mod h1:vcK6pKgO1WanCdd61qx4bFnSsDJQ6SbM2ZuMIgq86Jg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/KeisukeYamashita/go-vcl v0.4.0 h1:dFxZq2yVeaCWBJAT7Oh9Z+Pp8y32i7b11QHdzsuBcsk= +github.com/KeisukeYamashita/go-vcl v0.4.0/go.mod h1:af2qGlXbsHDQN5abN7hyGNKtGhcFSaDdbLl4sfud+AU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.49.6 h1:yNldzF5kzLBRvKlKz1S0bkvc2+04R1kt13KfBWQBfFA= +github.com/aws/aws-sdk-go v1.49.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= +github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665 h1:Iz3aEheYgn+//VX7VisgCmF/wW3BMtXCLbvHV4jMQJA= +github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665/go.mod h1:19bUnum2ZAeftfwwLZ/wRe7idyfoW2MfmXO464Hrfbw= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godoctor/godoctor v0.0.0-20181123222458-69df17f3a6f6/go.mod h1:+tyhT8jBF8E0XvdlSXOSL7Iko7DlNiongHq3q+wcsPs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g= +github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= +github.com/google/go-jsonnet v0.21.0 h1:43Bk3K4zMRP/aAZm9Po2uSEjY6ALCkYUVIcz9HLGMvA= +github.com/google/go-jsonnet v0.21.0/go.mod h1:tCGAu8cpUpEZcdGMmdOu37nh8bGgqubhI5v2iSk3KJQ= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.6 h1:5jHuM+aH373XNtXl9TNTUH5Qd69Trve11tHIrB+6yj4= +github.com/hashicorp/go-getter v1.7.6/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= +github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.17.0 h1:z1XvSUyXd1HP10U4lrLg5e0JMVz6CPaJvAgxM0KNZVY= +github.com/hashicorp/hcl/v2 v2.17.0/go.mod h1:gJyW2PTShkJqQBKpAmPO3yxMxIuoXkOF2TpqXzrQyx4= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= +github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE= +github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ= +github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ= +github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/muhammadmuzzammil1998/jsonc v1.0.0 h1:8o5gBQn4ZA3NBA9DlTujCj2a4w0tqWrPVjDwhzkgTIs= +github.com/muhammadmuzzammil1998/jsonc v1.0.0/go.mod h1:saF2fIVw4banK0H4+/EuqfFLpRnoy5S+ECwTOCcRcSU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/open-policy-agent/conftest v0.56.0 h1:Q27Y45rdUHAOTjkeTbmHf2kWgW+DeFauZMaDjJm98YA= +github.com/open-policy-agent/conftest v0.56.0/go.mod h1:u4xu/0jtZnsenKf06J/tdm/7CtP8ODmZ/JsRPTDCXMg= +github.com/open-policy-agent/conftest v0.62.0 h1:mk6Kbf8WTGjI8byKd59GWjIGsOPr+dmiEwjyDEZMWhk= +github.com/open-policy-agent/conftest v0.62.0/go.mod h1:oX2ScMAaFCJ2f4bAy23GBibaUzn1b8lRs6gkhu4G+IA= +github.com/open-policy-agent/opa v0.69.0 h1:s2igLw2Z6IvGWGuXSfugWkVultDMsM9pXiDuMp7ckWw= +github.com/open-policy-agent/opa v0.69.0/go.mod h1:+qyXJGkpEJ6kpB1kGo8JSwHtVXbTdsGdQYPWWNYNj+4= +github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ= +github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/owenrumney/go-sarif v1.1.1/go.mod h1:dNDiPlF04ESR/6fHlPyq7gHKmrM0sHUvAGjsoh8ZH0U= +github.com/owenrumney/go-sarif/v2 v2.3.3 h1:ubWDJcF5i3L/EIOER+ZyQ03IfplbSU1BLOE26uKQIIU= +github.com/owenrumney/go-sarif/v2 v2.3.3/go.mod h1:MSqMMx9WqlBSY7pXoOZWgEsVB4FDNfhcaXDA1j6Sr+w= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= +github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shteou/go-ignore v0.3.1 h1:/DVY4w06eKliWrbkwKfBHJgUleld+QAlmlQvfRQOigA= +github.com/shteou/go-ignore v0.3.1/go.mod h1:hMVyBe+qt5/Z11W/Fxxf86b5SuL8kM29xNWLYob9Vos= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= +github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= +github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tmccombs/hcl2json v0.3.1 h1:Pf+Lb9OpZ5lkQuIC0BB5txdCQskZ2ud/l8sz/Nkjf3A= +github.com/tmccombs/hcl2json v0.3.1/go.mod h1:ljY0/prd2IFUF3cagQjV3cpPEEQKzqyGqnKI7m5DBVY= +github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA= +github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vektah/gqlparser v1.2.0/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74= +github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY= +github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= +github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= +google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +muzzammil.xyz/jsonc v1.0.0 h1:B6kaT3wHueZ87mPz3q1nFuM1BlL32IG0wcq0/uOsQ18= +muzzammil.xyz/jsonc v1.0.0/go.mod h1:rFv8tUUKe+QLh7v02BhfxXEf4ZHhYD7unR93HL/1Uvo= +olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 h1:slmdOY3vp8a7KQbHkL+FLbvbkgMqmXojpFUO/jENuqQ= +olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw= +oras.land/oras-go/v2 v2.4.0 h1:i+Wt5oCaMHu99guBD0yuBjdLvX7Lz8ukPbwXdR7uBMs= +oras.land/oras-go/v2 v2.4.0/go.mod h1:osvtg0/ClRq1KkydMAEu/IxFieyjItcsQ4ut4PPF+f8= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.bingo/golangci-lint.mod b/.bingo/golangci-lint.mod index 4607edf92..5b5575d98 100644 --- a/.bingo/golangci-lint.mod +++ b/.bingo/golangci-lint.mod @@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT go 1.24.6 -require github.com/golangci/golangci-lint/v2 v2.6.2 // cmd/golangci-lint +require github.com/golangci/golangci-lint/v2 v2.7.2 // cmd/golangci-lint diff --git a/.bingo/golangci-lint.sum b/.bingo/golangci-lint.sum index 3146c7150..b7d8399e3 100644 --- a/.bingo/golangci-lint.sum +++ b/.bingo/golangci-lint.sum @@ -65,6 +65,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A= github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/MirrexOne/unqueryvet v1.3.0 h1:5slWSomgqpYU4zFuZ3NNOfOUxVPlXFDBPAVasZOGlAY= +github.com/MirrexOne/unqueryvet v1.3.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= @@ -114,6 +116,8 @@ github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= github.com/catenacyber/perfsprint v0.10.0 h1:AZj1mYyxbxLRqmnYOeguZXEQwWOgQGm2wzLI5d7Hl/0= github.com/catenacyber/perfsprint v0.10.0/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -209,6 +213,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godoc-lint/godoc-lint v0.10.1 h1:ZPUVzlDtJfA+P688JfPJPkI/SuzcBr/753yGIk5bOPA= github.com/godoc-lint/godoc-lint v0.10.1/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= +github.com/godoc-lint/godoc-lint v0.10.2 h1:dksNgK+zebnVlj4Fx83CRnCmPO0qRat/9xfFsir1nfg= +github.com/godoc-lint/godoc-lint v0.10.2/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -251,6 +257,8 @@ github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0a github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= github.com/golangci/golangci-lint/v2 v2.6.2 h1:jkMSVv36JmyTENcEertckvimvjPcD5qxNM7W7qhECvI= github.com/golangci/golangci-lint/v2 v2.6.2/go.mod h1:fSIMDiBt9kzdpnvvV7GO6iWzyv5uaeZ+iPor+2uRczE= +github.com/golangci/golangci-lint/v2 v2.7.2 h1:AhBC+YeEueec4AGlIbvPym5C70Thx0JykIqXbdIXWx0= +github.com/golangci/golangci-lint/v2 v2.7.2/go.mod h1:pDijleoBu7e8sejMqyZ3L5n6geqe+cVvOAz2QImqqVc= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= @@ -308,6 +316,8 @@ github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1T github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -394,6 +404,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -488,6 +500,8 @@ github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iM github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= github.com/securego/gosec/v2 v2.22.10 h1:ntbBqdWXnu46DUOXn+R2SvPo3PiJCDugTCgTW2g4tQg= github.com/securego/gosec/v2 v2.22.10/go.mod h1:9UNjK3tLpv/w2b0+7r82byV43wCJDNtEDQMeS+H/g2w= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 h1:rZg6IGn0ySYZwCX8LHwZoYm03JhG/cVAJJ3O+u3Vclo= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7/go.mod h1:9sr22NZO5Kfh7unW/xZxkGYTmj2484/fCiE54gw7UTY= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -503,10 +517,14 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -519,6 +537,8 @@ github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YE github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -541,6 +561,8 @@ github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwT github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU= github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= @@ -589,6 +611,8 @@ go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -642,6 +666,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -756,6 +782,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -774,6 +802,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -829,6 +859,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58 golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/.bingo/goreleaser.mod b/.bingo/goreleaser.mod index d4e6c3832..3fe6a4405 100644 --- a/.bingo/goreleaser.mod +++ b/.bingo/goreleaser.mod @@ -1,5 +1,5 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT -go 1.22.5 +go 1.24.6 -require github.com/goreleaser/goreleaser v1.26.2 +require github.com/goreleaser/goreleaser/v2 v2.11.2 diff --git a/.bingo/goreleaser.sum b/.bingo/goreleaser.sum index c5a6760d4..7d1df8e6f 100644 --- a/.bingo/goreleaser.sum +++ b/.bingo/goreleaser.sum @@ -1,199 +1,239 @@ +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= -cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= -cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= -cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= -code.gitea.io/sdk/gitea v0.18.0 h1:+zZrwVmujIrgobt6wVBWCqITz6bn1aBjnCUHmpZrerI= -code.gitea.io/sdk/gitea v0.18.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw= +cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw= +cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= +cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0= +cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY= +code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4= +code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/42wim/httpsig v1.2.2 h1:ofAYoHUNs/MJOLqQ8hIxeyz2QxOz8qdSVvp3PX/oPgA= +github.com/42wim/httpsig v1.2.2/go.mod h1:P/UYo7ytNBFwc+dg35IubuAUIs8zj5zzFIgUCEl55WY= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc= +github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22 h1:5NFK6VGgqBUOAX2SYyzFYvNdOiYDxzim8jga386FlZY= -github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22/go.mod h1:Kv+Mm9CdtnV8iem48iEPIwy7/N4Wmk0hpxYNH5gTwKQ= -github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a h1:nJ2G8zWKASyVClGVgG7sfM5mwoZlZ2zYpIzN2OhjWkw= -github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a/go.mod h1:ubLFmlsv8/DFUQrZwY5syT5/8Er3ugSr4rDFwHsE3hg= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a h1:smr1CcMkgeMd6G75N+2OVNk/uHbX/WLR0bk+kMWEyr8= +github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a/go.mod h1:P5IrP8AhuzApVKa5H7k2hHX5pZA1uhyi+Z1VjK1EtA4= +github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28 h1:TKlTOayTJKpoLPJbeMykEwxCn0enACf06u0RSIdFG5w= +github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28/go.mod h1:5iJIa34inbIEFRwoWxNBTnjzIcl4G3le1LppPDmpg/4= github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb h1:iDMnx6LIjtjZ46C0akqveX83WFzhpTD3eqOthawb5vU= github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb/go.mod h1:DmTY2Mfcv38hsHbG78xMiTDdxFtkHpgYNVDPsF2TgHk= -github.com/anchore/quill v0.4.1 h1:mffDnvnER3ZgPjN5hexc3nr/4Y1dtKdDB6td5K8uInk= -github.com/anchore/quill v0.4.1/go.mod h1:t6hOPYDohN8wn2SRWQdNkJBkhmK8s3gzuHzzgcEvzQU= +github.com/anchore/quill v0.5.1 h1:+TAJroWuMC0AofI4gD9V9v65zR8EfKZg8u+ZD+dKZS4= +github.com/anchore/quill v0.5.1/go.mod h1:tAzfFxVluL2P1cT+xEy+RgQX1hpNuliUC5dTYSsnCLQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/atc0005/go-teams-notify/v2 v2.10.0 h1:eQvRIkyESQgBvlUdQ/iPol/lj3QcRyrdEQM3+c/nXhM= -github.com/atc0005/go-teams-notify/v2 v2.10.0/go.mod h1:SIeE1UfCcVRYMqP5b+r1ZteHyA/2UAjzWF5COnZ8q0w= -github.com/aws/aws-sdk-go v1.53.0 h1:MMo1x1ggPPxDfHMXJnQudTbGXYlD4UigUAud1DJxPVo= -github.com/aws/aws-sdk-go v1.53.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo= -github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A= -github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 h1:vXY/Hq1XdxHBIYgBUmug/AbMyIe1AKulPYS2/VE1X70= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 h1:mDnFOE2sVkyphMWtTH+stv0eW3k0OTx94K63xpxHty4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0 h1:rdPrcOZmqT2F+yzmKEImrx5XUs7Hpf4V9Rp6E8mhsxQ= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5 h1:452e/nFuqPvwPg+1OD2CG/v29R9MH8egJSJKh2Qduv8= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 h1:mbWNpfRUTT6bnacmvOTKXZjR/HycibdWzNpfbrbLDIs= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g= -github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU= -github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f h1:Z0kS9pJDQgCg3u2lH6+CdYaFbyQtyukVTiUCG6re0E4= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f/go.mod h1:rAE739ssmE5O5fLuQ2y8uHdmOJaelE5I0Es3SxV0y1A= +github.com/atc0005/go-teams-notify/v2 v2.13.0 h1:nbDeHy89NjYlF/PEfLVF6lsserY9O5SnN1iOIw3AxXw= +github.com/atc0005/go-teams-notify/v2 v2.13.0/go.mod h1:WSv9moolRsBcpZbwEf6gZxj7h0uJlJskJq5zkEWKO8Y= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY= +github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= +github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= +github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 h1:6VFPH/Zi9xYFMJKPQOX5URYkQoXRWeJ7V/7Y6ZDYoms= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 h1:4HbnOGE9491a9zYJ9VpPh1ApgEq6ZlD4Kuv1PJenFpc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1/go.mod h1:Z6QnHC6TmpJWUxAy8FI4JzA7rTwl6EIANkyK9OR5z5w= +github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8= +github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 h1:ps3nrmBWdWwakZBydGX1CxeYFK80HsQ79JLMwm7Y4/c= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1/go.mod h1:bAdfrfxENre68Hh2swNaGEVuFYE74o0SaSCAlaG9E74= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 h1:MdVYlN5pcQu1t1OYx4Ajo3fKl1IEhzgdPQbYFCRjYS8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1/go.mod h1:iikmNLrvHm2p4a3/4BPeix2S9P+nW8yM1IZW73x8bFA= +github.com/aws/aws-sdk-go-v2/service/kms v1.38.1 h1:tecq7+mAav5byF+Mr+iONJnCBf4B4gon8RSp4BrweSc= +github.com/aws/aws-sdk-go-v2/service/kms v1.38.1/go.mod h1:cQn6tAF77Di6m4huxovNM7NVAozWTZLsDRp9t8Z/WYk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 h1:Hsqo8+dFxSdDvv9B2PgIx1AJAnDpqgS0znVI+R+MoGY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1/go.mod h1:8Q0TAPXD68Z8YqlcIGHs/UNIDHsxErV9H4dl4vJEpgw= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 h1:6lMw4/QGLFPvbKQ0eri/9Oh3YX5Nm6BPrUlZR8yuJHg= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1/go.mod h1:EVJOSYOVeoD3VFFZ/dWCAzWJp5wZr9lTOCjW8ejAmO0= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/blacktop/go-dwarf v1.0.9 h1:eT/L7gt0gllvvgnRXY0MFKjNB6+jtOY5DTm2ynVX2dY= -github.com/blacktop/go-dwarf v1.0.9/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo= -github.com/blacktop/go-macho v1.1.162 h1:FjM3XAsJTAOGZ1eppRSX9ZBX3Bk11JMTC1amsZAOA5I= -github.com/blacktop/go-macho v1.1.162/go.mod h1:f2X4noFBob4G5bWUrzvPBKDVcFWZgDCM7rIn7ygTID0= +github.com/blacktop/go-dwarf v1.0.10 h1:i9zYgcIROETsNZ6V+zZn3uDH21FCG5BLLZ837GitxS0= +github.com/blacktop/go-dwarf v1.0.10/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo= +github.com/blacktop/go-macho v1.1.238 h1:OFfT6NB/SWxkoky7L/ytuY8QekgFpa9pmz/GHUQLsmM= +github.com/blacktop/go-macho v1.1.238/go.mod h1:dtlW2AJKQpFzImBVPWiUKZ6OxrQ2MLfWi/BPPe0EONE= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= -github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d h1:xxPhzCOpmOntzVe8S6tqsMdFgaB8B4NXSV54lG4B1qk= -github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d/go.mod h1:ysMQ0a4RYWjgyvKrl5ME352oHA6QgK900g5sB9XXgPE= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043 h1:927VIkxPFKpfJKVDtCNgSQtlhksARaLvsLxppR2FukM= +github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043/go.mod h1:dXjdzg6bhg1JKnKuf6EBJTtcxtfHYBFEe9btxX5YeAE= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/caarlos0/ctrlc v1.2.0 h1:AtbThhmbeYx1WW3WXdWrd94EHKi+0NPRGS4/4pzrjwk= -github.com/caarlos0/ctrlc v1.2.0/go.mod h1:n3gDlSjsXZ7rbD9/RprIR040b7oaLfNStikPd4gFago= -github.com/caarlos0/env/v11 v11.0.1 h1:A8dDt9Ub9ybqRSUF3fQc/TA/gTam2bKT4Pit+cwrsPs= -github.com/caarlos0/env/v11 v11.0.1/go.mod h1:2RC3HQu8BQqtEK3V4iHPxj0jOdWdbPpWJ6pOueeU1xM= +github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA= +github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U= github.com/caarlos0/go-reddit/v3 v3.0.1 h1:w8ugvsrHhaE/m4ez0BO/sTBOBWI9WZTjG7VTecHnql4= github.com/caarlos0/go-reddit/v3 v3.0.1/go.mod h1:QlwgmG5SAqxMeQvg/A2dD1x9cIZCO56BMnMdjXLoisI= github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8= github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw= -github.com/caarlos0/go-version v0.1.1 h1:1bikKHkGGVIIxqCmufhSSs3hpBScgHGacrvsi8FuIfc= -github.com/caarlos0/go-version v0.1.1/go.mod h1:Ze5Qx4TsBBi5FyrSKVg1Ibc44KGV/llAaKGp86oTwZ0= -github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc= -github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo= +github.com/caarlos0/go-version v0.2.1 h1:bJY5WRvs2RXErLKBELd1WR0U72whX8ELbKg0WtQ9/7A= +github.com/caarlos0/go-version v0.2.1/go.mod h1:X+rI5VAtJDpcjCjeEIXpxGa5+rTcgur1FK66wS0/944= +github.com/caarlos0/log v0.5.1 h1:uB1jhC/+HimtyyL7pxidkUWO4raKmidVuXifC4uqMf8= +github.com/caarlos0/log v0.5.1/go.mod h1:37k7VCogxsMsgpIQaca5g9eXFFrLJ5LGgA4Ng/xN85o= github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc= github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8= github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM= github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/charmbracelet/bubbletea v0.22.1 h1:z66q0LWdJNOWEH9zadiAIXp2GN1AWrwNXU8obVY9X24= -github.com/charmbracelet/bubbletea v0.22.1/go.mod h1:8/7hVvbPN6ZZPkczLiB8YpLkLJ0n7DMho5Wvfd2X1C0= -github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= -github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= -github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d h1:+o+e/8hf7cG0SbAzEAm/usJ8qoZPgFXhudLjop+TM0g= -github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d/go.mod h1:aoG4bThKYIOnyB55r202eHqo6TkN7ZXV+cu4Do3eoBQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbletea v1.3.0 h1:fPMyirm0u3Fou+flch7hlJN9krlnVURrkUVDwqXjoAc= +github.com/charmbracelet/bubbletea v1.3.0/go.mod h1:eTaHfqbIwvBhFQM/nlT1NsGc4kp8jhF8LfUK67XiTDM= +github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40= +github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0= +github.com/charmbracelet/fang v0.3.0 h1:Be6TB+ExS8VWizTQRJgjqbJBudKrmVUet65xmFPGhaA= +github.com/charmbracelet/fang v0.3.0/go.mod h1:b0ZfEXZeBds0I27/wnTfnv2UVigFDXHhrFNwQztfA0M= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1 h1:SOylT6+BQzPHEjn15TIzawBPVD0QmhKXbcb3jY0ZIKU= +github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1/go.mod h1:tRlx/Hu0lo/j9viunCN2H+Ze6JrmdjQlXUQvvArgaOc= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444 h1:IJDiTgVE56gkAGfq0lBEloWgkXMk4hl/bmuPoicI4R0= +github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444/go.mod h1:T9jr8CzFpjhFVHjNjKwbAD7KwBNyFnj2pntAO7F2zw0= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI= -github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= @@ -204,62 +244,75 @@ github.com/dghubble/oauth1 v0.7.3 h1:EkEM/zMDMp3zOsX2DC/ZQ2vnEX3ELK0/l9kb+vs4ptE github.com/dghubble/oauth1 v0.7.3/go.mod h1:oxTe+az9NSMIucDPDCCtzJGsPhciJV33xocHfcR2sVY= github.com/dghubble/sling v1.4.0 h1:/n8MRosVTthvMbwlNZgLx579OGVjUOy3GNEv5BIqAWY= github.com/dghubble/sling v1.4.0/go.mod h1:0r40aNsU9EdDUVBNhfCstAtFgutjgJGYbO1oNzkMoM8= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA= -github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A= +github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= -github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/github/smimesign v0.2.0 h1:Hho4YcX5N1I9XNqhq0fNx0Sts8MhLonHd+HRXVGNjvk= github.com/github/smimesign v0.2.0/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= +github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= +github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= +github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -270,15 +323,16 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc= github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk= github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= -github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -286,14 +340,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -304,8 +359,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/certificate-transparency-go v1.3.1 h1:akbcTfQg0iZlANZLn0L9xOeWtyCIdeoYhKrqi5iH3Go= +github.com/google/certificate-transparency-go v1.3.1/go.mod h1:gg+UQlx6caKEDQ9EElFOujyxEQEfOiQzAt6782Bvi8k= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -313,73 +368,69 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= -github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= -github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= -github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-github/v74 v74.0.0 h1:yZcddTUn8DPbj11GxnMrNiAnXH14gNs559AsUpNpPgM= +github.com/google/go-github/v74 v74.0.0/go.mod h1:ubn/YdyftV80VPSI26nSJvaEsTOnsjrxG3o9kJhcyak= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/ko v0.15.4 h1:0blRbIdPmSy6v4LvedGxbI/8krdJYQgbSih3v6Y8V1c= -github.com/google/ko v0.15.4/go.mod h1:ZkcmfV91Xt6ZzOBHc/cXXGYnqWdNWDVy/gHoUU9sjag= +github.com/google/ko v0.18.0 h1:jkF5Fkvm+SMtqTt/SMzsCJO+6hz7FSDE6GRldGn0VVI= +github.com/google/ko v0.18.0/go.mod h1:iR0zT5aR4pINW9tk2Ujj99dBJ7cVy4to9ZirAkGKb9g= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a h1:JJBdjSfqSy3mnDT0940ASQFghwcZ4y4cb6ttjAoXqwE= -github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/rpmpack v0.7.0 h1:mA2Yd3/dOmao1ype0DJA8DFquEpslaleywOuglVCrUs= +github.com/google/rpmpack v0.7.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 h1:+9C/TgFfcCmZBV7Fjb3kQCGlkpFrhtvFDgbdQHB9RaA= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962/go.mod h1:H3K1Iu/utuCfa10JO+GsmKUYSWi7ug57Rk6GaDRHaaQ= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/goreleaser/chglog v0.6.1 h1:NZKiX8l0FTQPRzBgKST7knvNZmZ04f7PEGkN2wInfhE= -github.com/goreleaser/chglog v0.6.1/go.mod h1:Bnnfo07jMZkaAb0uRNASMZyOsX6ROW6X1qbXqN3guUo= +github.com/goreleaser/chglog v0.7.0 h1:/KzXWAeg4DrEz4r3OI6K2Yb8RAsVGeInCUfLWFXL9C0= +github.com/goreleaser/chglog v0.7.0/go.mod h1:2h/yyq9xvTUeM9tOoucBP+jri8Dj28splx+SjlYkklc= github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= -github.com/goreleaser/goreleaser v1.26.2 h1:1iY1HaXtRiMTrwy6KE1sNjkRjsjMi+9l0k6WUX8GpWw= -github.com/goreleaser/goreleaser v1.26.2/go.mod h1:mHi6zr6fuuOh5eHdWWgyo/N8BWED5WEVtb/4GETc9jQ= -github.com/goreleaser/nfpm/v2 v2.37.1 h1:RUmeEt8OlEVeSzKRrO5Vl5qVWCtUwx4j9uivGuRo5fw= -github.com/goreleaser/nfpm/v2 v2.37.1/go.mod h1:q8+sZXFqn106/eGw+9V+I8+izFxZ/sJjrhwmEUxXhUg= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/goreleaser/goreleaser/v2 v2.11.2 h1:Od6dcPI5r8IWVPnJYz6wYe3rML1qf80fLzXB1Ix6ZnY= +github.com/goreleaser/goreleaser/v2 v2.11.2/go.mod h1:NSsia+m49thkd/pX9Rz7Cq1KE8HDGrLJVoPLjFeAV/4= +github.com/goreleaser/nfpm/v2 v2.43.0 h1:o5oureuZkhu55RK0M9WSN8JLW7hu6MymtMh7LypInlk= +github.com/goreleaser/nfpm/v2 v2.43.0/go.mod h1:f//PE8PjNHjaPCbd7Jkok+aPKdLTrzM+fuIWg3PfVRg= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= -github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= -github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= @@ -410,9 +461,10 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 h1:FWpSWRD8FbVkKQu8M1DM9jF5oXFLyE+XpisIYfdzbic= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7/go.mod h1:BMxO138bOokdgt4UaxZiEfypcSHX0t6SIFimVP1oRfk= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -421,8 +473,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= @@ -432,46 +484,40 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= +github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666 h1:ndfLOJNaxu0fX358UKxtq2bU8IMASWi87Hn0Nv/TIoY= +github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666/go.mod h1:WGXwLq/jKt0kng727wv6a0h0q7TVC+MwS2S75rcqL+4= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= +github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= -github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-mastodon v0.0.8 h1:UgKs4SmQ5JeawxMIPP7NQ9xncmOXA+5q6jYk4erR7vk= -github.com/mattn/go-mastodon v0.0.8/go.mod h1:8YkqetHoAVEktRkK15qeiv/aaIMfJ/Gc89etisPZtHU= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw= +github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-mastodon v0.0.10 h1:wz1d/aCkJOIkz46iv4eAqXHVreUMxydY1xBWrPBdDeE= +github.com/mattn/go-mastodon v0.0.10/go.mod h1:YBofeqh7G6s787787NQR8erBYz6fKDu+KNMrn5RuD6Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= -github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70 h1:kMlmsLSbjkikxQJ1IPwaM+7LJ9ltFu/fi8CRzvSnQmA= -github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI= @@ -480,13 +526,10 @@ github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbY github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA= github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg= github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8= github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig= -github.com/muesli/termenv v0.11.1-0.20220212125758-44cd13922739/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -501,27 +544,30 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/getopt v0.0.0-20180811024354-2b5b3bfb099b/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -529,113 +575,143 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= +github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvvtIRwBrU/aegEYJYmvev0cHAwo17zZQ= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= -github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= -github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= +github.com/sigstore/cosign/v2 v2.5.0 h1:1aRfPgRQHHlODI3Mvs/JkPBS9dJT9bRLCuHZgnHxFt8= +github.com/sigstore/cosign/v2 v2.5.0/go.mod h1:2V2hmo+jjFNnDb5Q5VL6PXvLU9Vujio7T5yldrpNTRw= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.9 h1:sUjRpKVh/hhgqGMs0t+TubgYsksArZ6poLEC3MsGAzU= +github.com/sigstore/rekor v1.3.9/go.mod h1:xThNUhm6eNEmkJ/SiU/FVU7pLY2f380fSDZFsdDWlcM= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= +github.com/sigstore/sigstore-go v0.7.1 h1:lyzi3AjO6+BHc5zCf9fniycqPYOt3RaC08M/FRmQhVY= +github.com/sigstore/sigstore-go v0.7.1/go.mod h1:AIRj4I3LC82qd07VFm3T2zXYiddxeBV1k/eoS8nTz0E= +github.com/sigstore/timestamp-authority v1.2.5 h1:W22JmwRv1Salr/NFFuP7iJuhytcZszQjldoB8GiEdnw= +github.com/sigstore/timestamp-authority v1.2.5/go.mod h1:gWPKWq4HMWgPCETre0AakgBzcr9DRqHrsgbrRqsigOs= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/slack-go/slack v0.13.0 h1:7my/pR2ubZJ9912p9FtvALYpbt0cQPAqkRy2jaSI1PQ= -github.com/slack-go/slack v0.13.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= +github.com/slack-go/slack v0.17.3 h1:zV5qO3Q+WJAQ/XwbGfNFrRMaJ5T/naqaonyPV/1TP4g= +github.com/slack-go/slack v0.17.3/go.mod h1:X+UqOufi3LYQHDnMG1vxf0J8asC6+WllXrVrhl8/Prk= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.0.2 h1:PyNnjV9BJNzN1ZE6BcWK+5JbF+if370jjzO84SS+Ebo= +github.com/theupdateframework/go-tuf/v2 v2.0.2/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20= github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c h1:gFwUKtkv6QzQsFdIjvPqd0Qdw42DHUEbbUdiUTI1uco= github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302 h1:MhInbXe4SzcImAKktUvWBCWZgcw6MYf5NfumTj1BhAw= -github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c h1:Jmc9fHbd0LKFmS5CkLgczNUyW36UbiyvbHCG9xCTyiw= +github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/xanzy/go-gitlab v0.105.0 h1:3nyLq0ESez0crcaM19o5S//SvezOQguuIHZ3wgX64hM= -github.com/xanzy/go-gitlab v0.105.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8= gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= -go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +gitlab.com/gitlab-org/api/client-go v0.137.0 h1:H26yL44qnb38Czl20pEINCJrcj63W6/BX8iKPVUKQP0= +gitlab.com/gitlab-org/api/client-go v0.137.0/go.mod h1:AcAYES3lfkIS4zhso04S/wyUaWQmDYve2Fd9AF7C6qc= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -646,8 +722,8 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= -gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +gocloud.dev v0.42.0 h1:qzG+9ItUL3RPB62/Amugws28n+4vGZXEoJEAMfjutzw= +gocloud.dev v0.42.0/go.mod h1:zkaYAapZfQisXOA4bzhsbA4ckiStGQ3Psvs9/OQ5dPM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -655,20 +731,16 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -681,8 +753,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -696,17 +768,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -717,8 +787,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -731,45 +801,40 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -787,34 +852,34 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.235.0 h1:C3MkpQSRxS1Jy6AkzTGKKrpSCOd2WOGrezZ+icKSkKo= +google.golang.org/api v0.235.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -824,18 +889,14 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= -gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= @@ -843,7 +904,6 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -852,11 +912,13 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg= -sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s= +sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= +sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +software.sslmate.com/src/go-pkcs12 v0.5.0 h1:EC6R394xgENTpZ4RltKydeDUjtlM5drOYIG9c6TVj2M= +software.sslmate.com/src/go-pkcs12 v0.5.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/.bingo/kube-score.mod b/.bingo/kube-score.mod new file mode 100644 index 000000000..873a8ecb7 --- /dev/null +++ b/.bingo/kube-score.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.6 + +require github.com/zegl/kube-score v1.20.0 // cmd/kube-score diff --git a/.bingo/kube-score.sum b/.bingo/kube-score.sum new file mode 100644 index 000000000..9a4cabc8e --- /dev/null +++ b/.bingo/kube-score.sum @@ -0,0 +1,98 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb h1:ioQwBmKdOCpMVS/bDaESqNWXIE/aw4+gsVtysCGMWZ4= +github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb/go.mod h1:ZAPs+OyRzeVJFGvXVDVffgCzQfjg3qU9Ig8G/MU3zZ4= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report/v2 v2.1.0 h1:X3+hPYlSczH9IMIpSC9CQSZA0L+BipYafciZUWHEmsc= +github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zegl/kube-score v1.20.0 h1:J1VqK86SunV4Gg8emPTmwUVxe0rmXnAs5K9ZUbGMKR8= +github.com/zegl/kube-score v1.20.0/go.mod h1:mBOw3S3g7TBG/GziT8xNG15dCFn54/jUeEHndxLinE8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.bingo/variables.env b/.bingo/variables.env index fc3a980e0..52133be6a 100644 --- a/.bingo/variables.env +++ b/.bingo/variables.env @@ -10,6 +10,8 @@ fi BINGO="${GOBIN}/bingo-v0.9.0" +CONFTEST="${GOBIN}/conftest-v0.62.0" + CONTROLLER_GEN="${GOBIN}/controller-gen-v0.19.0" CRD_DIFF="${GOBIN}/crd-diff-v0.5.0" @@ -18,14 +20,16 @@ CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.2.0" GOJQ="${GOBIN}/gojq-v0.12.17" -GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.6.2" +GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.7.2" -GORELEASER="${GOBIN}/goreleaser-v1.26.2" +GORELEASER="${GOBIN}/goreleaser-v2.11.2" HELM="${GOBIN}/helm-v3.18.4" KIND="${GOBIN}/kind-v0.30.0" +KUBE_SCORE="${GOBIN}/kube-score-v1.20.0" + KUSTOMIZE="${GOBIN}/kustomize-v5.7.1" OPERATOR_SDK="${GOBIN}/operator-sdk-v1.41.1" diff --git a/.claude/commands/api-lint-diff.md b/.claude/commands/api-lint-diff.md new file mode 100644 index 000000000..f9d102d1e --- /dev/null +++ b/.claude/commands/api-lint-diff.md @@ -0,0 +1,184 @@ +--- +description: Validate API issues using kube-api-linter with diff-aware analysis +--- + +# API Lint Diff + +Validates API issues in `api/` directory using kube-api-linter with diff-aware analysis that distinguishes between NEW and PRE-EXISTING issues. + +## Instructions for Claude AI + +When this command is invoked, you MUST: + +**CRITICAL:** The final output MUST be a comprehensive analysis report displayed directly in the conversation for the user to read. Do NOT just create temp files - output the full report as your response. + +1. **Execute the shell script** + ```bash + bash hack/api-lint-diff/run.sh + ``` + +2. **Understand the shell script's output**: + - **False positives (IGNORED)**: Standard CRD scaffolding patterns that kube-api-linter incorrectly flags + - **NEW issues (ERRORS)**: Introduced in current branch → MUST fix + - **PRE-EXISTING issues (WARNINGS)**: Existed before changes → Can fix separately + +3. **Filter false positives** - Operator projects scaffold standard Kubernetes CRD patterns that kube-api-linter incorrectly flags as errors, even with WhenRequired configuration. + + **Scenario 1: optionalfields on Status field** + ```go + Status MyResourceStatus `json:"status,omitzero"` + ``` + **Error reported:** + ``` + optionalfields: field Status has a valid zero value ({}), but the validation + is not complete (e.g. min properties/adding required fields). The field should + be a pointer to allow the zero value to be set. If the zero value is not a + valid use case, complete the validation and remove the pointer. + ``` + **Why it's a FALSE POSITIVE:** + - Status is NEVER a pointer in any Kubernetes API + - Works correctly with `omitzero` tag + - Validation incompleteness is expected - Status is controller-managed, not user-provided + - **ACTION: IGNORE this error** + + **Scenario 2: nonpointerstructs on Spec field** + ```go + Spec MyResourceSpec `json:"spec"` + ``` + **Error reported:** + ``` + requiredfields: field Spec has a valid zero value ({}), but the validation is + not complete (e.g. min properties/adding required fields). The field should be + a pointer to allow the zero value to be set. If the zero value is not a valid + use case, complete the validation and remove the pointer. + ``` + **Why it's a FALSE POSITIVE:** + - Spec is NEVER a pointer in Kubernetes APIs + - Scaffolds are starting points - users add validation when they implement their business logic + - **ACTION: IGNORE this error** + + **Scenario 3: conditions markers on metav1.Condition** + ```go + Conditions []metav1.Condition `json:"conditions,omitempty"` + ``` + **Error reported:** + ``` + conditions: Conditions field is missing the following markers: + patchStrategy=merge, patchMergeKey=type + ``` + **Why it's a FALSE POSITIVE:** + - `metav1.Condition` already handles patches correctly + - Adding these markers is redundant for this standard Kubernetes type + - **ACTION: IGNORE this error** + +4. **For reported issues, provide intelligent analysis**: + + a. **Categorize by fix complexity**: + - NON-BREAKING: Marker replacements, adding listType, adding +required/+optional + - BREAKING: Pointer conversions, type changes (int→int32) + + b. **Search for actual usage** (REQUIRED FOR ALL ISSUES - NOT OPTIONAL): + - **CRITICAL:** Do NOT just look at JSON tags - analyze actual code usage patterns + - **Exception:** Deprecated marker replacements (`+kubebuilder:validation:Required` → `+required`) are straightforward - no usage analysis needed + - **For all other issues:** MUST analyze actual usage before making recommendations + - Use grep to find ALL occurrences where each field is: + * **Read/accessed**: `obj.Spec.FieldName`, `cat.Spec.Priority` + * **Written/set**: `obj.Spec.FieldName = value` + * **Checked for zero/nil**: `if obj.Spec.FieldName == ""`, `if ptr != nil` + * **Used in conditionals**: Understand semantic meaning of zero values + - Search in: controllers, reconcilers, internal packages, tests, examples + - **Smart assessment based on usage patterns**: + * Field ALWAYS set in code? → Should be **required**, no omitempty + * Field SOMETIMES set? → Should be **optional** with omitempty + * Code checks `if field == zero`? → May need **pointer** to distinguish zero vs unset + * Zero value semantically valid? → Keep as value type with omitempty + * Zero value semantically invalid? → Use pointer OR mark required + * Field never read but only set by controller? → Likely Status field + - **Example analysis workflow for a field**: + ``` + 1. Grep for field usage: `CatalogFilter.Version` + 2. Found 5 occurrences: + - controllers/extension.go:123: if filter.Version != "" { ... } + - controllers/extension.go:456: result.Version = bundle.Version + - tests/filter_test.go:89: Version: "1.2.3" + 3. Analysis: Version is checked for empty, sometimes set, sometimes omitted + 4. Recommendation: Optional with omitempty (current usage supports this) + ``` + + c. **Generate EXACT code fixes** grouped by file: + - Show current code + - Show replacement code, ready to copy and paste + - **Explain why based on actual usage analysis** (not just JSON tags): + * Include usage summary: "Found N occurrences" + * Cite specific examples: "Used in resolve/catalog.go:163 as direct int32" + * Explain semantic meaning: "Field distinguishes priority 0 vs unset" + * Justify recommendation: "Since code checks for empty, should be optional" + - Note breaking change impact with reasoning + - **Each fix MUST include evidence from code usage** + + d. **Prioritize recommendations**: + - NEW issues first (must fix) + - Group PRE-EXISTING by NON-BREAKING vs BREAKING + +5. **Present actionable report directly to user**: + - **IMPORTANT:** Output the full comprehensive analysis in the conversation (not just to a temp file) + - Summary: False positives filtered, NEW count, PRE-EXISTING count + - Group issues by file and fix type + - Provide code snippets ready to apply (current code → fixed code) + - **DO NOT include "Next Steps" or "Conclusion" sections** - just present the analysis + + **Report Structure:** + ``` + # API Lint Diff Analysis Report + + **Generated:** [date] + **Baseline:** main branch + **Current:** [branch name] + **Status:** [status icon and message based on logic below] + + **Status Logic:** + - ✅ PASSED: 0 real issues (after filtering false positives) + - ⚠️ WARN: 0 new issues but has pre-existing issues + - ❌ FAIL: Has new issues that must be fixed + + ## Executive Summary + - Total issues: X + - False positives (IGNORED): Y + - Real issues (NEED FIXING): Z + - NEW issues: N + - PRE-EXISTING issues: P + + ## REAL ISSUES - FIXES NEEDED (Z issues) + + ### Category 1: [Issue Type] (N issues) - [BREAKING/NON-BREAKING] + + #### File: [filename] + + **[Issue #]. Line X - [Field Name]** + ```go + // CURRENT: + [current code] + + // FIX: + [fixed code] + ``` + **Usage Analysis:** + - Found N occurrences in codebase + - [Specific usage example 1]: path/file.go:123 + - [Specific usage example 2]: path/file.go:456 + - Pattern: [always set / sometimes set / checked for zero / etc.] + + **Why:** [Recommendation based on usage analysis with evidence] + **Breaking:** [YES/NO] ([detailed reason with impact]) + + [Repeat for all issues] + + ## Summary of Breaking Changes + [Table of breaking changes if any] + ``` + +## Related Documentation + +- [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) +- [kube-api-linter](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/code-generator/cmd/kube-api-linter) +- AGENTS.md in this repository for understanding operator patterns diff --git a/.goreleaser.yml b/.goreleaser.yml index 720014214..e2807ca41 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy @@ -116,7 +117,7 @@ docker_manifests: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" changelog: use: github-native disable: '{{ ne .Env.ENABLE_RELEASE_PIPELINE "true" }}' diff --git a/.tilt-support b/.tilt-support index 9cb01b152..dcd827d04 100644 --- a/.tilt-support +++ b/.tilt-support @@ -14,7 +14,7 @@ def deploy_cert_manager_if_needed(): docker_build( ref='helper', context='.', - build_args={'GO_VERSION': '1.24'}, + build_args={'GO_VERSION': '1.25'}, dockerfile_contents=''' ARG GO_VERSION FROM golang:${GO_VERSION} diff --git a/Makefile b/Makefile index 17025fd9f..7b422471a 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,9 @@ ifeq ($(origin KIND_CLUSTER_NAME), undefined) KIND_CLUSTER_NAME := operator-controller endif +ifeq ($(origin KIND_CONFIG), undefined) +KIND_CONFIG := ./kind-config/kind-config.yaml +endif ifneq (, $(shell command -v docker 2>/dev/null)) CONTAINER_RUNTIME := docker @@ -118,9 +121,25 @@ help-extended: #HELP Display extended help. lint: lint-custom $(GOLANGCI_LINT) #HELP Run golangci linter. $(GOLANGCI_LINT) run --build-tags $(GO_BUILD_TAGS) $(GOLANGCI_LINT_ARGS) -lint-helm: $(HELM) #HELP Run helm linter +.PHONY: lint-helm +lint-helm: $(HELM) $(CONFTEST) #HELP Run helm linter helm lint helm/olmv1 helm lint helm/prometheus + (helm template olmv1 helm/olmv1; helm template prometheus helm/prometheus) | $(CONFTEST) test --policy hack/conftest/policy/ --combine -n main -n prometheus - + +.PHONY: lint-deployed-resources +lint-deployed-resources: $(KUBE_SCORE) #HELP Lint deployed resources. + (for ns in $$(printf "olmv1-system\n%s\n" "$(CATD_NAMESPACE)" | uniq); do \ + for resource in $$(kubectl api-resources --verbs=list --namespaced -o name); do \ + kubectl get $$resource -n $$ns -o yaml ; \ + echo "---" ; \ + done \ + done) | $(KUBE_SCORE) score - \ + `# TODO: currently these checks are failing, decide if resources should be fixed for them to pass (https://github.com/operator-framework/operator-controller/issues/2398)` \ + --ignore-test container-resources \ + --ignore-test container-image-pull-policy \ + --ignore-test container-ephemeral-storage-request-and-limit \ + --ignore-test container-security-context-user-group-id .PHONY: custom-linter-build custom-linter-build: #EXHELP Build custom linter @@ -161,9 +180,10 @@ $(EXPERIMENTAL_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml $(EXPERIMENTAL_E2E_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml helm/e2e.yaml HELM_SETTINGS ?= .PHONY: $(MANIFESTS) -$(MANIFESTS): $(HELM) +$(MANIFESTS): $(HELM) $(CONFTEST) @mkdir -p $(MANIFEST_HOME) $(HELM) template olmv1 helm/olmv1 $(addprefix --values ,$($@)) $(addprefix --set ,$(HELM_SETTINGS)) > $@ + $(CONFTEST) test --policy hack/conftest/policy/ -n main --combine $@ # Generate manifests stored in source-control .PHONY: manifests @@ -215,7 +235,7 @@ test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run a .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v ./test/e2e/... + go test -count=1 -v ./test/e2e/features_test.go E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e @@ -266,7 +286,7 @@ image-registry: ## Build the testdata catalog used for e2e tests and push it to # or inject unintended characters into the binary (e.g., version metadata). go build $(GO_BUILD_FLAGS) $(GO_BUILD_EXTRA_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags "$(GO_BUILD_LDFLAGS)" -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o ./testdata/push/bin/push ./testdata/push/push.go $(CONTAINER_RUNTIME) build -f ./testdata/Dockerfile -t $(E2E_REGISTRY_IMAGE) ./testdata - $(CONTAINER_RUNTIME) save $(E2E_REGISTRY_IMAGE) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(E2E_REGISTRY_IMAGE) --name $(KIND_CLUSTER_NAME) ./testdata/build-test-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME) $(E2E_REGISTRY_IMAGE) # When running the e2e suite, you can set the ARTIFACT_PATH variable to the absolute path @@ -285,6 +305,7 @@ test-e2e: run-internal image-registry prometheus e2e e2e-coverage kind-clean #HE .PHONY: test-experimental-e2e test-experimental-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_E2E_MANIFEST) test-experimental-e2e: KIND_CLUSTER_NAME := operator-controller-e2e +test-experimental-e2e: KIND_CONFIG := ./kind-config/kind-config-2node.yaml test-experimental-e2e: GO_BUILD_EXTRA_FLAGS := -cover test-experimental-e2e: COVERAGE_NAME := experimental-e2e test-experimental-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) @@ -385,8 +406,8 @@ stop-profiling: build-test-profiler #EXHELP Stop profiling and generate analysis .PHONY: kind-load kind-load: $(KIND) #EXHELP Loads the currently constructed images into the KIND cluster. - $(CONTAINER_RUNTIME) save $(OPCON_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) - $(CONTAINER_RUNTIME) save $(CATD_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(OPCON_IMG) --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(CATD_IMG) --name $(KIND_CLUSTER_NAME) .PHONY: kind-deploy kind-deploy: export DEFAULT_CATALOG := $(RELEASE_CATALOGS) @@ -411,8 +432,9 @@ kind-deploy-experimental: manifests .PHONY: kind-cluster kind-cluster: $(KIND) kind-verify-versions #EXHELP Standup a kind cluster. -$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) - $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config ./kind-config.yaml + $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG) $(KIND) export kubeconfig --name $(KIND_CLUSTER_NAME) + kubectl wait --for=condition=Ready nodes --all --timeout=2m .PHONY: kind-clean kind-clean: $(KIND) #EXHELP Delete the kind cluster. @@ -475,7 +497,7 @@ go-build-linux: export GOARCH=amd64 go-build-linux: $(BINARIES) .PHONY: run-internal -run-internal: docker-build kind-cluster kind-load kind-deploy wait +run-internal: docker-build kind-cluster kind-load kind-deploy lint-deployed-resources wait .PHONY: run run: SOURCE_MANIFEST := $(STANDARD_MANIFEST) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 3d1381689..065665c2b 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,5 +1,6 @@ aliases: olmv1-approvers: + - camilamacedo86 - grokspawn - joelanford - kevinrizza diff --git a/api/v1/clustercatalog_types.go b/api/v1/clustercatalog_types.go index c18fa3c7e..8df90a806 100644 --- a/api/v1/clustercatalog_types.go +++ b/api/v1/clustercatalog_types.go @@ -51,7 +51,7 @@ const ( //+kubebuilder:printcolumn:name="Serving",type=string,JSONPath=`.status.conditions[?(@.type=="Serving")].status` //+kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp` -// ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. +// ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. // For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs type ClusterCatalog struct { metav1.TypeMeta `json:",inline"` @@ -60,16 +60,14 @@ type ClusterCatalog struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // spec is the desired state of the ClusterCatalog. - // spec is required. - // The controller will work to ensure that the desired - // catalog is unpacked and served over the catalog content HTTP server. + // spec is a required field that defines the desired state of the ClusterCatalog. + // The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. // +kubebuilder:validation:Required Spec ClusterCatalogSpec `json:"spec"` - // status contains information about the state of the ClusterCatalog such as: - // - Whether or not the catalog contents are being served via the catalog content HTTP server - // - Whether or not the ClusterCatalog is progressing to a new state + // status contains the following information about the state of the ClusterCatalog: + // - Whether the catalog contents are being served via the catalog content HTTP server + // - Whether the ClusterCatalog is progressing to a new state // - A reference to the source from which the catalog contents were retrieved // +optional Status ClusterCatalogStatus `json:"status,omitempty"` @@ -93,15 +91,12 @@ type ClusterCatalogList struct { // ClusterCatalogSpec defines the desired state of ClusterCatalog type ClusterCatalogSpec struct { - // source allows a user to define the source of a catalog. - // A "catalog" contains information on content that can be installed on a cluster. - // Providing a catalog source makes the contents of the catalog discoverable and usable by - // other on-cluster components. - // These on-cluster components may do a variety of things with this information, such as - // presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + // source is a required field that defines the source of a catalog. + // A catalog contains information on content that can be installed on a cluster. + // The catalog source makes catalog contents discoverable and usable by other on-cluster components. + // These components can present the content in a GUI dashboard or install content from the catalog on the cluster. // The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. // For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - // source is a required field. // // Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: // @@ -113,19 +108,18 @@ type ClusterCatalogSpec struct { // +kubebuilder:validation:Required Source CatalogSource `json:"source"` - // priority allows the user to define a priority for a ClusterCatalog. - // priority is optional. + // priority is an optional field that defines a priority for this ClusterCatalog. // - // A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - // A higher number means higher priority. + // Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + // Higher numbers mean higher priority. // - // It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - // When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + // Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + // Clients should prompt users for additional input to break the tie. // - // When omitted, the default priority is 0 because that is the zero value of integers. + // When omitted, the default priority is 0. // - // Negative numbers can be used to specify a priority lower than the default. - // Positive numbers can be used to specify a priority higher than the default. + // Use negative numbers to specify a priority lower than the default. + // Use positive numbers to specify a priority higher than the default. // // The lowest possible value is -2147483648. // The highest possible value is 2147483647. @@ -136,21 +130,18 @@ type ClusterCatalogSpec struct { // +optional Priority int32 `json:"priority"` - // availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - // availabilityMode is optional. + // availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. // - // Allowed values are "Available" and "Unavailable" and omitted. + // Allowed values are "Available", "Unavailable", or omitted. // // When omitted, the default value is "Available". // - // When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - // Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - // and its contents as usable. + // When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + // Clients should consider this ClusterCatalog and its contents as usable. // - // When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - // When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - // Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - // to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + // When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + // Treat this the same as if the ClusterCatalog does not exist. + // Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. // // +kubebuilder:validation:Enum:="Unavailable";"Available" // +kubebuilder:default:="Available" @@ -160,24 +151,23 @@ type ClusterCatalogSpec struct { // ClusterCatalogStatus defines the observed state of ClusterCatalog type ClusterCatalogStatus struct { - // conditions is a representation of the current state for this ClusterCatalog. + // conditions represents the current state of this ClusterCatalog. // // The current condition types are Serving and Progressing. // - // The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - // When it has a status of True and a reason of Available, the contents of the catalog are being served. - // When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - // When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + // The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + // - When status is True and reason is Available, the catalog contents are being served. + // - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + // - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. // - // The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - // When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - // When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - // When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + // The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + // - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + // - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + // - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. // - // In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - // catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - // contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - // to the contents we identify that there are updates to the contents. + // If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + // - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + // - The Progressing condition is True with reason Retrying because the system is working to serve the new version. // // +listType=map // +listMapKey=type @@ -189,30 +179,25 @@ type ClusterCatalogStatus struct { // urls contains the URLs that can be used to access the catalog. // +optional URLs *ClusterCatalogURLs `json:"urls,omitempty"` - // lastUnpacked represents the last time the contents of the - // catalog were extracted from their source format. As an example, - // when using an Image source, the OCI image will be pulled and the - // image layers written to a file-system backed cache. We refer to the - // act of this extraction from the source format as "unpacking". + // lastUnpacked represents the last time the catalog contents were extracted from their source format. + // For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + // This extraction from the source format is called "unpacking". // +optional LastUnpacked *metav1.Time `json:"lastUnpacked,omitempty"` } // ClusterCatalogURLs contains the URLs that can be used to access the catalog. type ClusterCatalogURLs struct { - // base is a cluster-internal URL that provides endpoints for - // accessing the content of the catalog. + // base is a cluster-internal URL that provides endpoints for accessing the catalog content. // - // It is expected that clients append the path for the endpoint they wish - // to access. + // Clients should append the path for the endpoint they want to access. // - // Currently, only a single endpoint is served and is accessible at the path - // /api/v1. + // Currently, only a single endpoint is served and is accessible at the path /api/v1. // // The endpoints served for the v1 API are: - // - /all - this endpoint returns the entirety of the catalog contents in the FBC format + // - /all - this endpoint returns the entire catalog contents in the FBC format // - // As the needs of users and clients of the evolve, new endpoints may be added. + // New endpoints may be added as needs evolve. // // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength:=525 @@ -226,20 +211,19 @@ type ClusterCatalogURLs struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise" type CatalogSource struct { - // type is a reference to the type of source the catalog is sourced from. - // type is required. + // type is a required field that specifies the type of source for the catalog. // // The only allowed value is "Image". // - // When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + // When set to "Image", the ClusterCatalog content is sourced from an OCI image. // When using an image source, the image field must be set and must be the only field defined for this type. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Image" // +kubebuilder:validation:Required Type SourceType `json:"type"` - // image is used to configure how catalog contents are sourced from an OCI image. - // This field is required when type is Image, and forbidden otherwise. + // image configures how catalog contents are sourced from an OCI image. + // It is required when type is Image, and forbidden otherwise. // +optional Image *ImageSource `json:"image,omitempty"` } @@ -249,27 +233,26 @@ type CatalogSource struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise" type ResolvedCatalogSource struct { - // type is a reference to the type of source the catalog is sourced from. - // type is required. + // type is a required field that specifies the type of source for the catalog. // // The only allowed value is "Image". // - // When set to "Image", information about the resolved image source will be set in the 'image' field. + // When set to "Image", information about the resolved image source is set in the image field. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Image" // +kubebuilder:validation:Required Type SourceType `json:"type"` - // image is a field containing resolution information for a catalog sourced from an image. - // This field must be set when type is Image, and forbidden otherwise. + // image contains resolution information for a catalog sourced from an image. + // It must be set when type is Image, and forbidden otherwise. Image *ResolvedImageSource `json:"image"` } // ResolvedImageSource provides information about the resolved source of a Catalog sourced from an image. type ResolvedImageSource struct { // ref contains the resolved image digest-based reference. - // The digest format is used so users can use other tooling to fetch the exact - // OCI manifests that were used to extract the catalog contents. + // The digest format allows you to use other tooling to fetch the exact OCI manifests + // that were used to extract the catalog contents. // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength:=1000 // +kubebuilder:validation:XValidation:rule="self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\\\b')",message="must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character." @@ -287,11 +270,10 @@ type ResolvedImageSource struct { // reject the resource since there is no use in polling a digest-based image reference. // +kubebuilder:validation:XValidation:rule="self.ref.find('(@.*:)') != \"\" ? !has(self.pollIntervalMinutes) : true",message="cannot specify pollIntervalMinutes while using digest-based image" type ImageSource struct { - // ref allows users to define the reference to a container image containing Catalog contents. - // ref is required. - // ref can not be more than 1000 characters. + // ref is a required field that defines the reference to a container image containing catalog contents. + // It cannot be more than 1000 characters. // - // A reference can be broken down into 3 parts - the domain, name, and identifier. + // A reference has 3 parts: the domain, name, and identifier. // // The domain is typically the registry where an image is located. // It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -337,11 +319,10 @@ type ImageSource struct { // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').matches(':[0-9A-Fa-f]*$') : true",message="digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)" Ref string `json:"ref"` - // pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - // pollIntervalMinutes is optional. - // pollIntervalMinutes can not be specified when ref is a digest-based reference. + // pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + // You cannot specify pollIntervalMinutes when ref is a digest-based reference. // - // When omitted, the image will not be polled for new content. + // When omitted, the image is not polled for new content. // +kubebuilder:validation:Minimum:=1 // +optional PollIntervalMinutes *int `json:"pollIntervalMinutes,omitempty"` diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go index fb82b7a23..f098d2220 100644 --- a/api/v1/clusterextension_types.go +++ b/api/v1/clusterextension_types.go @@ -48,16 +48,15 @@ const ( // ClusterExtensionSpec defines the desired state of ClusterExtension type ClusterExtensionSpec struct { - // namespace is a reference to a Kubernetes namespace. - // This is the namespace in which the provided ServiceAccount must exist. - // It also designates the default namespace where namespace-scoped resources - // for the extension are applied to the cluster. + // namespace specifies a Kubernetes namespace. + // This is the namespace where the provided ServiceAccount must exist. + // It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. // Some extensions may contain namespace-scoped resources to be applied in other namespaces. // This namespace must exist. // - // namespace is required, immutable, and follows the DNS label standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - // start and end with an alphanumeric character, and be no longer than 63 characters + // The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + // and be no longer than 63 characters. // // [RFC 1123]: https://tools.ietf.org/html/rfc1123 // @@ -67,20 +66,20 @@ type ClusterExtensionSpec struct { // +kubebuilder:validation:Required Namespace string `json:"namespace"` - // serviceAccount is a reference to a ServiceAccount used to perform all interactions - // with the cluster that are required to manage the extension. + // serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + // that are required to manage the extension. // The ServiceAccount must be configured with the necessary permissions to perform these interactions. // The ServiceAccount must exist in the namespace referenced in the spec. - // serviceAccount is required. + // The serviceAccount field is required. // // +kubebuilder:validation:Required ServiceAccount ServiceAccountReference `json:"serviceAccount"` - // source is a required field which selects the installation source of content - // for this ClusterExtension. Selection is performed by setting the sourceType. + // source is required and selects the installation source of content for this ClusterExtension. + // Set the sourceType field to perform the selection. // - // Catalog is currently the only implemented sourceType, and setting the - // sourcetype to "Catalog" requires the catalog field to also be defined. + // Catalog is currently the only implemented sourceType. + // Setting sourceType to "Catalog" requires the catalog field to also be defined. // // Below is a minimal example of a source definition (in yaml): // @@ -92,15 +91,15 @@ type ClusterExtensionSpec struct { // +kubebuilder:validation:Required Source SourceConfig `json:"source"` - // install is an optional field used to configure the installation options - // for the ClusterExtension such as the pre-flight check configuration. + // install is optional and configures installation options for the ClusterExtension, + // such as the pre-flight check configuration. // // +optional Install *ClusterExtensionInstallConfig `json:"install,omitempty"` - // config is an optional field used to specify bundle specific configuration - // used to configure the bundle. Configuration is bundle specific and a bundle may provide - // a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + // config is optional and specifies bundle-specific configuration. + // Configuration is bundle-specific and a bundle may provide a configuration schema. + // When not specified, the default configuration of the resolved bundle is used. // // config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide // a configuration schema the bundle is deemed to not be configurable. More information on how @@ -118,13 +117,12 @@ const SourceTypeCatalog = "Catalog" // +union // +kubebuilder:validation:XValidation:rule="has(self.sourceType) && self.sourceType == 'Catalog' ? has(self.catalog) : !has(self.catalog)",message="catalog is required when sourceType is Catalog, and forbidden otherwise" type SourceConfig struct { - // sourceType is a required reference to the type of install source. + // sourceType is required and specifies the type of install source. // - // Allowed values are "Catalog" + // The only allowed value is "Catalog". // - // When this field is set to "Catalog", information for determining the - // appropriate bundle of content to install will be fetched from - // ClusterCatalog resources existing on the cluster. + // When set to "Catalog", information for determining the appropriate bundle of content to install + // is fetched from ClusterCatalog resources on the cluster. // When using the Catalog sourceType, the catalog field must also be set. // // +unionDiscriminator @@ -132,8 +130,8 @@ type SourceConfig struct { // +kubebuilder:validation:Required SourceType string `json:"sourceType"` - // catalog is used to configure how information is sourced from a catalog. - // This field is required when sourceType is "Catalog", and forbidden otherwise. + // catalog configures how information is sourced from a catalog. + // It is required when sourceType is "Catalog", and forbidden otherwise. // // +optional Catalog *CatalogFilter `json:"catalog,omitempty"` @@ -145,11 +143,11 @@ type SourceConfig struct { // +kubebuilder:validation:XValidation:rule="has(self.preflight)",message="at least one of [preflight] are required when install is specified" // +union type ClusterExtensionInstallConfig struct { - // preflight is an optional field that can be used to configure the checks that are - // run before installation or upgrade of the content for the package specified in the packageName field. + // preflight is optional and configures the checks that run before installation or upgrade + // of the content for the package specified in the packageName field. // // When specified, it replaces the default preflight configuration for install/upgrade actions. - // When not specified, the default configuration will be used. + // When not specified, the default configuration is used. // // +optional Preflight *PreflightConfig `json:"preflight,omitempty"` @@ -161,22 +159,20 @@ type ClusterExtensionInstallConfig struct { // +kubebuilder:validation:XValidation:rule="has(self.configType) && self.configType == 'Inline' ?has(self.inline) : !has(self.inline)",message="inline is required when configType is Inline, and forbidden otherwise" // +union type ClusterExtensionConfig struct { - // configType is a required reference to the type of configuration source. + // configType is required and specifies the type of configuration source. // - // Allowed values are "Inline" + // The only allowed value is "Inline". // - // When this field is set to "Inline", the cluster extension configuration is defined inline within the - // ClusterExtension resource. + // When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Inline" // +kubebuilder:validation:Required ConfigType ClusterExtensionConfigType `json:"configType"` - // inline contains JSON or YAML values specified directly in the - // ClusterExtension. + // inline contains JSON or YAML values specified directly in the ClusterExtension. // - // inline is used to specify arbitrary configuration values for the ClusterExtension. + // It is used to specify arbitrary configuration values for the ClusterExtension. // It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. // The configuration values are validated at runtime against a JSON schema provided by the bundle. // @@ -189,13 +185,12 @@ type ClusterExtensionConfig struct { // CatalogFilter defines the attributes used to identify and filter content from a catalog. type CatalogFilter struct { - // packageName is a reference to the name of the package to be installed - // and is used to filter the content from catalogs. + // packageName specifies the name of the package to be installed and is used to filter + // the content from catalogs. // - // packageName is required, immutable, and follows the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // Some examples of valid values are: // - some-package @@ -218,12 +213,13 @@ type CatalogFilter struct { // +kubebuilder:validation:Required PackageName string `json:"packageName"` - // version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + // version is an optional semver constraint (a specific version or range of versions). + // When unspecified, the latest version available is installed. // // Acceptable version ranges are no longer than 64 characters. - // Version ranges are composed of comma- or space-delimited values and one or - // more comparison operators, known as comparison strings. Additional - // comparison strings can be added using the OR operator (||). + // Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + // known as comparison strings. + // You can add additional comparison strings using the OR operator (||). // // # Range Comparisons // @@ -297,25 +293,24 @@ type CatalogFilter struct { // +optional Version string `json:"version,omitempty"` - // channels is an optional reference to a set of channels belonging to - // the package specified in the packageName field. + // channels is optional and specifies a set of channels belonging to the package + // specified in the packageName field. // - // A "channel" is a package-author-defined stream of updates for an extension. + // A channel is a package-author-defined stream of updates for an extension. // - // Each channel in the list must follow the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. No more than 256 channels can be specified. + // Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. + // You can specify no more than 256 channels. // - // When specified, it is used to constrain the set of installable bundles and - // the automated upgrade path. This constraint is an AND operation with the - // version field. For example: + // When specified, it constrains the set of installable bundles and the automated upgrade path. + // This constraint is an AND operation with the version field. For example: // - Given channel is set to "foo" // - Given version is set to ">=1.0.0, <1.5.0" - // - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - // - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + // - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + // - Automatic upgrades are constrained to upgrade edges defined by the selected channel // - // When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + // When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. // // Some examples of valid values are: // - 1.1.x @@ -342,33 +337,28 @@ type CatalogFilter struct { // +optional Channels []string `json:"channels,omitempty"` - // selector is an optional field that can be used - // to filter the set of ClusterCatalogs used in the bundle - // selection process. + // selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. // - // When unspecified, all ClusterCatalogs will be used in - // the bundle selection process. + // When unspecified, all ClusterCatalogs are used in the bundle selection process. // // +optional Selector *metav1.LabelSelector `json:"selector,omitempty"` - // upgradeConstraintPolicy is an optional field that controls whether - // the upgrade path(s) defined in the catalog are enforced for the package - // referenced in the packageName field. + // upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + // are enforced for the package referenced in the packageName field. // - // Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + // Allowed values are "CatalogProvided", "SelfCertified", or omitted. // - // When this field is set to "CatalogProvided", automatic upgrades will only occur - // when upgrade constraints specified by the package author are met. + // When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + // author are met. // - // When this field is set to "SelfCertified", the upgrade constraints specified by - // the package author are ignored. This allows for upgrades and downgrades to - // any version of the package. This is considered a dangerous operation as it - // can lead to unknown and potentially disastrous outcomes, such as data - // loss. It is assumed that users have independently verified changes when - // using this option. + // When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + // This allows upgrades and downgrades to any version of the package. + // This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + // such as data loss. + // Use this option only if you have independently verified the changes. // - // When this field is omitted, the default value is "CatalogProvided". + // When omitted, the default value is "CatalogProvided". // // +kubebuilder:validation:Enum:=CatalogProvided;SelfCertified // +kubebuilder:default:=CatalogProvided @@ -378,16 +368,14 @@ type CatalogFilter struct { // ServiceAccountReference identifies the serviceAccount used fo install a ClusterExtension. type ServiceAccountReference struct { - // name is a required, immutable reference to the name of the ServiceAccount - // to be used for installation and management of the content for the package - // specified in the packageName field. + // name is a required, immutable reference to the name of the ServiceAccount used for installation + // and management of the content for the package specified in the packageName field. // // This ServiceAccount must exist in the installNamespace. // - // name follows the DNS subdomain standard as defined in [RFC 1123]. - // It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // The name field follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // Some examples of valid values are: // - some-serviceaccount @@ -413,26 +401,24 @@ type ServiceAccountReference struct { // // +kubebuilder:validation:XValidation:rule="has(self.crdUpgradeSafety)",message="at least one of [crdUpgradeSafety] are required when preflight is specified" type PreflightConfig struct { - // crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - // checks that run prior to upgrades of installed content. + // crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + // before upgrades of installed content. // - // The CRD Upgrade Safety pre-flight check safeguards from unintended - // consequences of upgrading a CRD, such as data loss. + // The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + // such as data loss. CRDUpgradeSafety *CRDUpgradeSafetyPreflightConfig `json:"crdUpgradeSafety"` } // CRDUpgradeSafetyPreflightConfig is the configuration for CRD upgrade safety preflight check. type CRDUpgradeSafetyPreflightConfig struct { - // enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + // enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. // // Allowed values are "None" or "Strict". The default value is "Strict". // - // When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - // when performing an upgrade operation. This should be used with caution as - // unintended consequences such as data loss can occur. + // When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + // Use this option with caution as unintended consequences such as data loss can occur. // - // When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - // performing an upgrade operation. + // When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. // // +kubebuilder:validation:Enum:="None";"Strict" // +kubebuilder:validation:Required @@ -455,17 +441,16 @@ const ( // BundleMetadata is a representation of the identifying attributes of a bundle. type BundleMetadata struct { - // name is required and follows the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters" Name string `json:"name"` - // version is a required field and is a reference to the version that this bundle represents - // version follows the semantic versioning standard as defined in https://semver.org/. + // version is required and references the version that this bundle represents. + // It follows the semantic versioning standard as defined in https://semver.org/. // // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="self.matches(\"^([0-9]+)(\\\\.[0-9]+)?(\\\\.[0-9]+)?(-([-0-9A-Za-z]+(\\\\.[-0-9A-Za-z]+)*))?(\\\\+([-0-9A-Za-z]+(-\\\\.[-0-9A-Za-z]+)*))?\")",message="version must be well-formed semver" @@ -491,9 +476,9 @@ type RevisionStatus struct { type ClusterExtensionStatus struct { // The set of condition types which apply to all spec.source variations are Installed and Progressing. // - // The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - // When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - // When Installed is False and the Reason is Failed, the bundle has failed to install. + // The Installed condition represents whether the bundle has been installed for this ClusterExtension: + // - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + // - When Installed is False and the Reason is Failed, the bundle has failed to install. // // The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. // When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -503,12 +488,12 @@ type ClusterExtensionStatus struct { // When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. // // - // When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - // These are indications from a package owner to guide users away from a particular package, channel, or bundle. - // BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - // ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - // PackageDeprecated is set if the requested package is marked deprecated in the catalog. - // Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + // When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + // These are indications from a package owner to guide users away from a particular package, channel, or bundle: + // - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + // - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + // - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + // - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. // // +listType=map // +listMapKey=type @@ -531,10 +516,10 @@ type ClusterExtensionStatus struct { // ClusterExtensionInstallStatus is a representation of the status of the identified bundle. type ClusterExtensionInstallStatus struct { - // bundle is a required field which represents the identifying attributes of a bundle. + // bundle is required and represents the identifying attributes of a bundle. // - // A "bundle" is a versioned set of content that represents the resources that - // need to be applied to a cluster to install a package. + // A "bundle" is a versioned set of content that represents the resources that need to be applied + // to a cluster to install a package. // // +kubebuilder:validation:Required Bundle BundleMetadata `json:"bundle"` diff --git a/commitchecker.yaml b/commitchecker.yaml index 883a4f9e0..8aa8939b4 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 39718ba9a077b3e95ff7e69cc8e6bef5a8815541 +expectedMergeBase: 109ecdc13ba04da1f2ec0617f5b9a47a023add9c upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md index 4fc8cb3c6..6aeb4c8f4 100644 --- a/docs/api-reference/olmv1-api-reference.md +++ b/docs/api-reference/olmv1-api-reference.md @@ -46,8 +46,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | name is required and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. | | Required: \{\}
| -| `version` _string_ | version is a required field and is a reference to the version that this bundle represents
version follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\}
| +| `name` _string_ | name is required and follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters. | | Required: \{\}
| +| `version` _string_ | version is required and references the version that this bundle represents.
It follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\}
| #### CRDUpgradeSafetyEnforcement @@ -80,7 +80,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation. | | Enum: [None Strict]
Required: \{\}
| +| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
Use this option with caution as unintended consequences such as data loss can occur.
When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. | | Enum: [None Strict]
Required: \{\}
| #### CatalogFilter @@ -96,11 +96,11 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `packageName` _string_ | packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
packageName is required, immutable, and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| -| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or
more comparison operators, known as comparison strings. Additional
comparison strings can be added using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
| -| `channels` _string array_ | channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
A "channel" is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. No more than 256 channels can be specified.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- Automatic upgrades will be constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
| -| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is an optional field that can be used
to filter the set of ClusterCatalogs used in the bundle
selection process.
When unspecified, all ClusterCatalogs will be used in
the bundle selection process. | | | -| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is an optional field that controls whether
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
When this field is set to "SelfCertified", the upgrade constraints specified by
the package author are ignored. This allows for upgrades and downgrades to
any version of the package. This is considered a dangerous operation as it
can lead to unknown and potentially disastrous outcomes, such as data
loss. It is assumed that users have independently verified changes when
using this option.
When this field is omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
| +| `packageName` _string_ | packageName specifies the name of the package to be installed and is used to filter
the content from catalogs.
It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| +| `version` _string_ | version is an optional semver constraint (a specific version or range of versions).
When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
known as comparison strings.
You can add additional comparison strings using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
| +| `channels` _string array_ | channels is optional and specifies a set of channels belonging to the package
specified in the packageName field.
A channel is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
You can specify no more than 256 channels.
When specified, it constrains the set of installable bundles and the automated upgrade path.
This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
- Automatic upgrades are constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
| +| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
When unspecified, all ClusterCatalogs are used in the bundle selection process. | | | +| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
are enforced for the package referenced in the packageName field.
Allowed values are "CatalogProvided", "SelfCertified", or omitted.
When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
author are met.
When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
This allows upgrades and downgrades to any version of the package.
This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
such as data loss.
Use this option only if you have independently verified the changes.
When omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
| #### CatalogSource @@ -117,15 +117,15 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
| -| `image` _[ImageSource](#imagesource)_ | image is used to configure how catalog contents are sourced from an OCI image.
This field is required when type is Image, and forbidden otherwise. | | | +| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
| +| `image` _[ImageSource](#imagesource)_ | image configures how catalog contents are sourced from an OCI image.
It is required when type is Image, and forbidden otherwise. | | | #### ClusterCatalog -ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. +ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs @@ -140,8 +140,8 @@ _Appears in:_ | `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | | `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is the desired state of the ClusterCatalog.
spec is required.
The controller will work to ensure that the desired
catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
| -| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains information about the state of the ClusterCatalog such as:
- Whether or not the catalog contents are being served via the catalog content HTTP server
- Whether or not the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | | +| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is a required field that defines the desired state of the ClusterCatalog.
The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
| +| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains the following information about the state of the ClusterCatalog:
- Whether the catalog contents are being served via the catalog content HTTP server
- Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | | #### ClusterCatalogList @@ -177,9 +177,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `source` _[CatalogSource](#catalogsource)_ | source allows a user to define the source of a catalog.
A "catalog" contains information on content that can be installed on a cluster.
Providing a catalog source makes the contents of the catalog discoverable and usable by
other on-cluster components.
These on-cluster components may do a variety of things with this information, such as
presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
| -| `priority` _integer_ | priority allows the user to define a priority for a ClusterCatalog.
priority is optional.
A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
A higher number means higher priority.
It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
When omitted, the default priority is 0 because that is the zero value of integers.
Negative numbers can be used to specify a priority lower than the default.
Positive numbers can be used to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | | -| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
availabilityMode is optional.
Allowed values are "Available" and "Unavailable" and omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
and its contents as usable.
When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. | Available | Enum: [Unavailable Available]
| +| `source` _[CatalogSource](#catalogsource)_ | source is a required field that defines the source of a catalog.
A catalog contains information on content that can be installed on a cluster.
The catalog source makes catalog contents discoverable and usable by other on-cluster components.
These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
| +| `priority` _integer_ | priority is an optional field that defines a priority for this ClusterCatalog.
Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
Higher numbers mean higher priority.
Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
Clients should prompt users for additional input to break the tie.
When omitted, the default priority is 0.
Use negative numbers to specify a priority lower than the default.
Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | | +| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
Clients should consider this ClusterCatalog and its contents as usable.
When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
Treat this the same as if the ClusterCatalog does not exist.
Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. | Available | Enum: [Unavailable Available]
| #### ClusterCatalogStatus @@ -195,10 +195,10 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
When it has a status of True and a reason of Available, the contents of the catalog are being served.
When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
to the contents we identify that there are updates to the contents. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
- When status is True and reason is Available, the catalog contents are being served.
- When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
- When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
- When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
- When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
- The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
- The Progressing condition is True with reason Retrying because the system is working to serve the new version. | | | | `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | | | `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | | -| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the contents of the
catalog were extracted from their source format. As an example,
when using an Image source, the OCI image will be pulled and the
image layers written to a file-system backed cache. We refer to the
act of this extraction from the source format as "unpacking". | | | +| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the catalog contents were extracted from their source format.
For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
This extraction from the source format is called "unpacking". | | | #### ClusterCatalogURLs @@ -214,7 +214,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `base` _string_ | base is a cluster-internal URL that provides endpoints for
accessing the content of the catalog.
It is expected that clients append the path for the endpoint they wish
to access.
Currently, only a single endpoint is served and is accessible at the path
/api/v1.
The endpoints served for the v1 API are:
- /all - this endpoint returns the entirety of the catalog contents in the FBC format
As the needs of users and clients of the evolve, new endpoints may be added. | | MaxLength: 525
Required: \{\}
| +| `base` _string_ | base is a cluster-internal URL that provides endpoints for accessing the catalog content.
Clients should append the path for the endpoint they want to access.
Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- /all - this endpoint returns the entire catalog contents in the FBC format
New endpoints may be added as needs evolve. | | MaxLength: 525
Required: \{\}
| #### ClusterExtension @@ -253,8 +253,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is a required reference to the type of configuration source.
Allowed values are "Inline"
When this field is set to "Inline", the cluster extension configuration is defined inline within the
ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
| -| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the
ClusterExtension.
inline is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
| +| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is required and specifies the type of configuration source.
The only allowed value is "Inline".
When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
| +| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension.
It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
| #### ClusterExtensionConfigType @@ -287,7 +287,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is an optional field that can be used to configure the checks that are
run before installation or upgrade of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration will be used. | | | +| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is optional and configures the checks that run before installation or upgrade
of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration is used. | | | #### ClusterExtensionInstallStatus @@ -303,7 +303,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is a required field which represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package. | | Required: \{\}
| +| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is required and represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that need to be applied
to a cluster to install a package. | | Required: \{\}
| #### ClusterExtensionList @@ -339,11 +339,11 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `namespace` _string_ | namespace is a reference to a Kubernetes namespace.
This is the namespace in which the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources
for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
namespace is required, immutable, and follows the DNS label standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
start and end with an alphanumeric character, and be no longer than 63 characters
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
| -| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a reference to a ServiceAccount used to perform all interactions
with the cluster that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
serviceAccount is required. | | Required: \{\}
| -| `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content
for this ClusterExtension. Selection is performed by setting the sourceType.
Catalog is currently the only implemented sourceType, and setting the
sourcetype to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| -| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is an optional field used to configure the installation options
for the ClusterExtension such as the pre-flight check configuration. | | | -| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is an optional field used to specify bundle specific configuration
used to configure the bundle. Configuration is bundle specific and a bundle may provide
a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version.
| | | +| `namespace` _string_ | namespace specifies a Kubernetes namespace.
This is the namespace where the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
| +| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
The serviceAccount field is required. | | Required: \{\}
| +| `source` _[SourceConfig](#sourceconfig)_ | source is required and selects the installation source of content for this ClusterExtension.
Set the sourceType field to perform the selection.
Catalog is currently the only implemented sourceType.
Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| +| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension,
such as the pre-flight check configuration. | | | +| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration.
Configuration is bundle-specific and a bundle may provide a configuration schema.
When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version.
| | | #### ClusterExtensionStatus @@ -359,7 +359,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle.
BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
PackageDeprecated is set if the requested package is marked deprecated in the catalog.
Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | | | `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | | | `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions,
including both installed and rolling out revisions.
| | | @@ -382,8 +382,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ref` _string_ | ref allows users to define the reference to a container image containing Catalog contents.
ref is required.
ref can not be more than 1000 characters.
A reference can be broken down into 3 parts - the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
| -| `pollIntervalMinutes` _integer_ | pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
pollIntervalMinutes is optional.
pollIntervalMinutes can not be specified when ref is a digest-based reference.
When omitted, the image will not be polled for new content. | | Minimum: 1
| +| `ref` _string_ | ref is a required field that defines the reference to a container image containing catalog contents.
It cannot be more than 1000 characters.
A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
| +| `pollIntervalMinutes` _integer_ | pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
You cannot specify pollIntervalMinutes when ref is a digest-based reference.
When omitted, the image is not polled for new content. | | Minimum: 1
| #### PreflightConfig @@ -399,7 +399,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
checks that run prior to upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss. | | | +| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
before upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
such as data loss. | | | #### ResolvedCatalogSource @@ -416,8 +416,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", information about the resolved image source will be set in the 'image' field. | | Enum: [Image]
Required: \{\}
| -| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image is a field containing resolution information for a catalog sourced from an image.
This field must be set when type is Image, and forbidden otherwise. | | | +| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
When set to "Image", information about the resolved image source is set in the image field. | | Enum: [Image]
Required: \{\}
| +| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image contains resolution information for a catalog sourced from an image.
It must be set when type is Image, and forbidden otherwise. | | | #### ResolvedImageSource @@ -433,7 +433,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ref` _string_ | ref contains the resolved image digest-based reference.
The digest format is used so users can use other tooling to fetch the exact
OCI manifests that were used to extract the catalog contents. | | MaxLength: 1000
Required: \{\}
| +| `ref` _string_ | ref contains the resolved image digest-based reference.
The digest format allows you to use other tooling to fetch the exact OCI manifests
that were used to extract the catalog contents. | | MaxLength: 1000
Required: \{\}
| #### RevisionStatus @@ -466,7 +466,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount
to be used for installation and management of the content for the package
specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
name follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| +| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount used for installation
and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
The name field follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| #### SourceConfig @@ -482,8 +482,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `sourceType` _string_ | sourceType is a required reference to the type of install source.
Allowed values are "Catalog"
When this field is set to "Catalog", information for determining the
appropriate bundle of content to install will be fetched from
ClusterCatalog resources existing on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
| -| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog is used to configure how information is sourced from a catalog.
This field is required when sourceType is "Catalog", and forbidden otherwise. | | | +| `sourceType` _string_ | sourceType is required and specifies the type of install source.
The only allowed value is "Catalog".
When set to "Catalog", information for determining the appropriate bundle of content to install
is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
| +| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog configures how information is sourced from a catalog.
It is required when sourceType is "Catalog", and forbidden otherwise. | | | #### SourceType diff --git a/go.mod b/go.mod index 89aa72d7b..3e48c7703 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,15 @@ module github.com/operator-framework/operator-controller -go 1.24.6 +go 1.25.3 require ( github.com/BurntSushi/toml v1.5.0 github.com/Masterminds/semver/v3 v3.4.0 github.com/blang/semver/v4 v4.0.0 - github.com/cert-manager/cert-manager v1.18.2 + github.com/cert-manager/cert-manager v1.19.2 github.com/containerd/containerd v1.7.29 + github.com/cucumber/godog v0.15.1 + github.com/evanphx/json-patch v5.9.11+incompatible github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.3 github.com/golang-jwt/jwt/v5 v5.3.0 @@ -15,34 +17,34 @@ require ( github.com/google/go-containerregistry v0.20.7 github.com/google/renameio/v2 v2.0.1 github.com/gorilla/handlers v1.5.2 - github.com/klauspost/compress v1.18.1 + github.com/klauspost/compress v1.18.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/operator-framework/api v0.36.0 + github.com/operator-framework/api v0.37.0 github.com/operator-framework/helm-operator-plugins v0.8.0 github.com/operator-framework/operator-registry v1.61.0 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/common v0.67.4 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 - github.com/spf13/cobra v1.10.1 + github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 go.podman.io/image/v5 v5.38.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b - golang.org/x/mod v0.30.0 - golang.org/x/sync v0.18.0 - golang.org/x/tools v0.39.0 - helm.sh/helm/v3 v3.19.3 - k8s.io/api v0.34.1 - k8s.io/apiextensions-apiserver v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/apiserver v0.34.1 - k8s.io/cli-runtime v0.34.0 - k8s.io/client-go v0.34.1 - k8s.io/component-base v0.34.1 + golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 + golang.org/x/mod v0.31.0 + golang.org/x/sync v0.19.0 + golang.org/x/tools v0.40.0 + helm.sh/helm/v3 v3.19.4 + k8s.io/api v0.34.2 + k8s.io/apiextensions-apiserver v0.34.2 + k8s.io/apimachinery v0.34.2 + k8s.io/apiserver v0.34.2 + k8s.io/cli-runtime v0.34.2 + k8s.io/client-go v0.34.2 + k8s.io/component-base v0.34.2 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.34.0 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d pkg.package-operator.run/boxcutter v0.7.1 sigs.k8s.io/controller-runtime v0.22.4 sigs.k8s.io/controller-tools v0.19.0 @@ -52,7 +54,7 @@ require ( require ( k8s.io/component-helpers v0.34.0 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect ) require ( @@ -71,7 +73,7 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect @@ -86,8 +88,10 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect + github.com/cucumber/gherkin/go/v26 v26.2.0 // indirect + github.com/cucumber/messages/go/v21 v21.0.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/cyphar/filepath-securejoin v0.6.0 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v29.0.3+incompatible // indirect @@ -97,7 +101,6 @@ require ( github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect - github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect @@ -110,13 +113,13 @@ require ( github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.22.0 // indirect - github.com/go-openapi/jsonreference v0.21.1 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect github.com/go-openapi/swag v0.24.1 // indirect github.com/go-openapi/swag/cmdutils v0.24.0 // indirect github.com/go-openapi/swag/conv v0.24.0 // indirect github.com/go-openapi/swag/fileutils v0.24.0 // indirect - github.com/go-openapi/swag/jsonname v0.24.0 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect github.com/go-openapi/swag/jsonutils v0.24.0 // indirect github.com/go-openapi/swag/loading v0.24.0 // indirect github.com/go-openapi/swag/mangling v0.24.0 // indirect @@ -126,6 +129,7 @@ require ( github.com/go-openapi/swag/yamlutils v0.24.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/uuid v4.3.1+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -138,11 +142,14 @@ require ( github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-memdb v1.3.4 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -215,8 +222,8 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect @@ -225,12 +232,12 @@ require ( go.podman.io/storage v1.61.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/net v0.47.0 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.13.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect @@ -244,11 +251,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/controller-manager v0.33.2 // indirect - k8s.io/kubectl v0.34.0 // indirect + k8s.io/kubectl v0.34.2 // indirect oras.land/oras-go/v2 v2.6.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect - sigs.k8s.io/gateway-api v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/gateway-api v1.4.0 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect diff --git a/go.sum b/go.sum index c9879d9aa..bd0840db8 100644 --- a/go.sum +++ b/go.sum @@ -44,11 +44,11 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.18.2 h1:H2P75ycGcTMauV3gvpkDqLdS3RSXonWF2S49QGA1PZE= -github.com/cert-manager/cert-manager v1.18.2/go.mod h1:icDJx4kG9BCNpGjBvrmsFd99d+lXUvWdkkcrSSQdIiw= +github.com/cert-manager/cert-manager v1.19.2 h1:jSprN1h5pgNDSl7HClAmIzXuTxic/5FXJ32kbQHqjlM= +github.com/cert-manager/cert-manager v1.19.2/go.mod h1:e9NzLtOKxTw7y99qLyWGmPo6mrC1Nh0EKKcMkRfK+GE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -85,13 +85,21 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cucumber/gherkin/go/v26 v26.2.0 h1:EgIjePLWiPeslwIWmNQ3XHcypPsWAHoMCz/YEBKP4GI= +github.com/cucumber/gherkin/go/v26 v26.2.0/go.mod h1:t2GAPnB8maCT4lkHL99BDCVNzCh1d7dBhCLt150Nr/0= +github.com/cucumber/godog v0.15.1 h1:rb/6oHDdvVZKS66hrhpjFQFHjthFSrQBCOI1LwshNTI= +github.com/cucumber/godog v0.15.1/go.mod h1:qju+SQDewOljHuq9NSM66s0xEhogx0q30flfxL4WUk8= +github.com/cucumber/messages/go/v21 v21.0.1 h1:wzA0LxwjlWQYZd32VTlAVDTkW6inOFmSM+RuOwHZiMI= +github.com/cucumber/messages/go/v21 v21.0.1/go.mod h1:zheH/2HS9JLVFukdrsPWoPdmUtmYQAQPLk7w5vWsk5s= +github.com/cucumber/messages/go/v22 v22.0.0/go.mod h1:aZipXTKc0JnjCsXrJnuZpWhtay93k7Rn3Dee7iyPJjs= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= -github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -163,10 +171,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.22.0 h1:TmMhghgNef9YXxTu1tOopo+0BGEytxA+okbry0HjZsM= -github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= -github.com/go-openapi/jsonreference v0.21.1 h1:bSKrcl8819zKiOgxkbVNRUBIr6Wwj9KYrDbMjRs0cDA= -github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8= github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I= @@ -175,8 +183,8 @@ github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFi github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak= github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= -github.com/go-openapi/swag/jsonname v0.24.0 h1:2wKS9bgRV/xB8c62Qg16w4AUiIrqqiniJFtZGi3dg5k= -github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts= github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc= @@ -202,6 +210,9 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= @@ -265,8 +276,8 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -274,8 +285,19 @@ github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -300,12 +322,15 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -332,8 +357,8 @@ github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -388,8 +413,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.36.0 h1:6+duRhamCvB540JbvNp/1+Pot7luff7HqdAOm9bAntg= -github.com/operator-framework/api v0.36.0/go.mod h1:QSmHMx8XpGsNWvjU5CUelVZC916VLp/TZhfYvGKpghM= +github.com/operator-framework/api v0.37.0 h1:2XCMWitBnumtJTqzip6LQKUwpM2pXVlt3gkpdlkbaCE= +github.com/operator-framework/api v0.37.0/go.mod h1:NZs4vB+Jiamyv3pdPDjZtuC4U7KX0eq4z2r5hKY5fUA= github.com/operator-framework/helm-operator-plugins v0.8.0 h1:0f6HOQC5likkf0b/OvGvw7nhDb6h8Cj5twdCNjwNzMc= github.com/operator-framework/helm-operator-plugins v0.8.0/go.mod h1:Sc+8bE38xTCgCChBUvtq/PxatEg9fAypr7S5iAw8nlA= github.com/operator-framework/operator-lib v0.17.0 h1:cbz51wZ9+GpWR1ZYP4CSKSSBxDlWxmmnseaHVZZjZt4= @@ -457,8 +482,11 @@ github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -478,6 +506,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -525,10 +554,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwd go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= @@ -577,11 +606,11 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= +golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -592,8 +621,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -610,8 +639,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -626,8 +655,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -646,8 +675,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -657,8 +686,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -668,8 +697,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -684,10 +713,10 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= -golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -745,8 +774,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.19.3 h1:cTOsZ7XfjD9c05mPKTC1FjRT4h2cKzszfD5aSa72GM8= -helm.sh/helm/v3 v3.19.3/go.mod h1:vup/q0mmu4G+YD2xr9qF5GhhWdoj+wm2gXWojk5jnks= +helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30= +helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= @@ -769,14 +798,14 @@ k8s.io/controller-manager v0.34.0 h1:oCHoqS8dcFp7zDSu7HUvTpakq3isSxil3GprGGlJMsE k8s.io/controller-manager v0.34.0/go.mod h1:XFto21U+Mm9BT8r/Jd5E4tHCGtwjKAUFOuDcqaj2VK0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w= k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= pkg.package-operator.run/boxcutter v0.7.1 h1:us5wn0px9aAkumrXiQx38+Sc9dTgKJsHFbePoRQeWRo= @@ -789,10 +818,10 @@ sigs.k8s.io/controller-tools v0.19.0 h1:OU7jrPPiZusryu6YK0jYSjPqg8Vhf8cAzluP9XGI sigs.k8s.io/controller-tools v0.19.0/go.mod h1:y5HY/iNDFkmFla2CfQoVb2AQXMsBk4ad84iR1PLANB0= sigs.k8s.io/crdify v0.5.0 h1:mrMH9CgXQPTZUpTU6Klqfnlys8bggv/7uvLT2lXSP7A= sigs.k8s.io/crdify v0.5.0/go.mod h1:ZIFxaYNgKYmFtZCLPysncXQ8oqwnNlHQbRUfxJHZwzU= -sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= -sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/gateway-api v1.4.0 h1:ZwlNM6zOHq0h3WUX2gfByPs2yAEsy/EenYJB78jpQfQ= +sigs.k8s.io/gateway-api v1.4.0/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= diff --git a/hack/api-lint-diff/run.sh b/hack/api-lint-diff/run.sh new file mode 100755 index 000000000..ca067d618 --- /dev/null +++ b/hack/api-lint-diff/run.sh @@ -0,0 +1,434 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +YELLOW='\033[1;33m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Temporary directory for this run +TEMP_DIR="" +WORKTREE_DIR="" +BASELINE_BRANCH="${BASELINE_BRANCH:-main}" +API_DIR="api" + +# Cleanup function +cleanup() { + # Clean up git worktree first if it exists + if [[ -n "${WORKTREE_DIR}" && -d "${WORKTREE_DIR}" ]]; then + git worktree remove "${WORKTREE_DIR}" --force &> /dev/null || true + fi + + # Clean up temporary directory + if [[ -n "${TEMP_DIR}" && -d "${TEMP_DIR}" ]]; then + rm -rf "${TEMP_DIR}" + fi +} + +trap cleanup EXIT + +# Ensure we're in the repository root +if [[ ! -d ".git" ]]; then + echo -e "${RED}Error: Must be run from repository root${NC}" + exit 1 +fi + +if [[ ! -d "${API_DIR}" ]]; then + echo -e "${RED}Error: ${API_DIR}/ directory not found${NC}" + exit 1 +fi + +# Create temporary directory +TEMP_DIR=$(mktemp -d) +echo -e "${BLUE}Using temporary directory: ${TEMP_DIR}${NC}" >&2 + +# Get golangci-lint version from bingo +get_golangci_version() { + # Extract version from .bingo/Variables.mk + # Format: GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.7.2 + local version + version=$(grep 'GOLANGCI_LINT :=' .bingo/Variables.mk 2>/dev/null | sed -E 's/.*golangci-lint-(v[0-9.]+).*/\1/') + + # Validate that we got a version + if [[ -z "${version}" ]]; then + echo -e "${YELLOW}Warning: Could not extract golangci-lint version from .bingo/Variables.mk${NC}" >&2 + echo -e "${YELLOW}Using default version: latest${NC}" >&2 + version="latest" + fi + + echo "${version}" +} + +# Create temporary golangci-lint config for kube-api-linter +# This config focuses only on kube-api-linter for the api/ directory +create_temp_config() { + cat > "${TEMP_DIR}/.golangci.yaml" <<'EOF' +version: "2" +output: + formats: + tab: + path: stdout + colors: false +linters: + enable: + - kubeapilinter + settings: + custom: + kubeapilinter: + type: module + description: "Kube API Linter plugin" + original-url: "sigs.k8s.io/kube-api-linter" + settings: + linters: {} + lintersConfig: + optionalfields: + pointers: + preference: WhenRequired +run: + timeout: 5m + +issues: + exclude-rules: + # Ignore generated files + - path: zz_generated\..*\.go + linters: + - kubeapilinter + max-issues-per-linter: 0 + max-same-issues: 0 +EOF +} + +# Get kube-api-linter version - pinned for supply chain security +get_kube_api_linter_version() { + # Pin to specific pseudo-version to avoid supply chain risks + # kube-api-linter doesn't have semantic version tags, so we use a pseudo-version + # Update this version intentionally as part of dependency management + # To update: GOPROXY=https://proxy.golang.org go list -m -json sigs.k8s.io/kube-api-linter@latest + local version="v0.0.0-20251219161032-180d2bd496ef" # Latest as of 2025-12-19 + + echo "${version}" +} + +# Create custom golangci-lint configuration +create_custom_gcl_config() { + # Get golangci-lint version from bingo + local golangci_version + golangci_version=$(get_golangci_version) + + # Validate version is not empty + if [[ -z "${golangci_version}" ]]; then + echo -e "${RED}Error: Failed to determine golangci-lint version from .bingo/Variables.mk${NC}" >&2 + exit 1 + fi + + # Get kube-api-linter version (pinned for supply chain security) + local kube_api_linter_version + kube_api_linter_version=$(get_kube_api_linter_version) + + # Create custom-gcl config + cat > "${TEMP_DIR}/.custom-gcl.yml" <&2 + + # Create custom config + create_custom_gcl_config + + # Build custom golangci-lint using the 'custom' command + # This requires the base golangci-lint binary and runs from TEMP_DIR + # where .custom-gcl.yml is located + echo -e "${BLUE}Running golangci-lint custom build...${NC}" >&2 + local build_output + local abs_base_linter + # Convert to absolute path + if [[ "${base_linter}" != /* ]]; then + abs_base_linter="$(pwd)/${base_linter}" + else + abs_base_linter="${base_linter}" + fi + + if ! build_output=$(cd "${TEMP_DIR}" && "${abs_base_linter}" custom -v 2>&1); then + echo -e "${YELLOW}Warning: Failed to build custom golangci-lint${NC}" >&2 + echo -e "${YELLOW}Build output:${NC}" >&2 + echo "${build_output}" >&2 + echo -e "${YELLOW}Falling back to base linter (kube-api-linter will not be available)${NC}" >&2 + return 1 + fi + echo -e "${BLUE}Custom linter build completed${NC}" >&2 + + if [[ -f "${custom_binary}" ]]; then + echo -e "${GREEN}Successfully built custom golangci-lint at ${custom_binary}${NC}" >&2 + # Only echo the binary path to stdout for capture + echo "${custom_binary}" + return 0 + else + echo -e "${YELLOW}Warning: Custom binary not found at expected location${NC}" >&2 + return 1 + fi +} + +# Function to check if golangci-lint has kube-api-linter +check_linter_support() { + local linter_path="$1" + if ! "${linter_path}" linters 2>/dev/null | grep -q "kubeapilinter"; then + echo -e "${YELLOW}Warning: golangci-lint at ${linter_path} does not have kubeapilinter plugin${NC}" + echo -e "${YELLOW}Linting results may be incomplete. Consider using custom golangci-lint build.${NC}" + return 1 + fi + return 0 +} + +# Find golangci-lint binary +find_golangci_lint() { + # Check for custom build first + if [[ -f ".bingo/golangci-lint" ]]; then + echo ".bingo/golangci-lint" + return 0 + fi + + # Check for bin/golangci-lint + if [[ -f "bin/golangci-lint" ]]; then + echo "bin/golangci-lint" + return 0 + fi + + # Fall back to system golangci-lint + if command -v golangci-lint &> /dev/null; then + echo "golangci-lint" + return 0 + fi + + echo -e "${RED}Error: golangci-lint not found.${NC}" >&2 + echo -e "${RED}Searched for:${NC}" >&2 + echo -e " - .bingo/golangci-lint" >&2 + echo -e " - bin/golangci-lint" >&2 + echo -e " - golangci-lint on your \$PATH" >&2 + exit 1 +} + +# Run linter and capture output +run_linter() { + local config_file="$1" + local output_file="$2" + local linter_path="$3" + local repo_root="${4:-$(pwd)}" + + # Run golangci-lint on api/ directory only + # Use absolute paths to ensure consistency + (cd "${repo_root}" && "${linter_path}" run \ + --config="${config_file}" \ + --path-prefix="" \ + ./api/...) > "${output_file}" 2>&1 || true +} + +# Parse linter output into structured format +# Format: filename:line:column:linter:message +parse_linter_output() { + local output_file="$1" + local parsed_file="$2" + + # Expected format: path/api/v1/file.go:123:45 linter message + # We need to: extract api/ relative path, parse line:col, linter, and message + grep "/${API_DIR}/" "${output_file}" | \ + sed -E "s|^.*/("${API_DIR}"/[^:]+):([0-9]+):([0-9]+)[[:space:]]+([^[:space:]]+)[[:space:]]+(.+)$|\1:\2:\3:\4:\5|" \ + > "${parsed_file}" || true +} + +# Get list of files changed in api/ directory compared to baseline +get_changed_files() { + git diff "${BASELINE_BRANCH}...HEAD" --name-only -- "${API_DIR}/" | \ + grep '\.go$' | \ + grep -v 'zz_generated' || true +} + +# Categorize issues as NEW or PRE-EXISTING +categorize_issues() { + local current_file="$1" + local baseline_file="$2" + local changed_files_file="$3" + local new_issues_file="$4" + local preexisting_issues_file="$5" + + # Read changed files into array + local changed_files=() + if [[ -f "${changed_files_file}" ]]; then + while IFS= read -r file; do + changed_files+=("${file}") + done < "${changed_files_file}" + fi + + # Process current issues only if file exists and is not empty + if [[ -f "${current_file}" && -s "${current_file}" ]]; then + while IFS= read -r line; do + [[ -z "${line}" ]] && continue + + local file + file=$(echo "${line}" | cut -d: -f1) + + # If no files were changed, all issues are pre-existing + if [[ ${#changed_files[@]} -eq 0 ]]; then + echo "${line}" >> "${preexisting_issues_file}" + continue + fi + + # Check if file was changed + local file_changed=false + for changed_file in "${changed_files[@]}"; do + if [[ "${file}" == "${changed_file}" ]]; then + file_changed=true + break + fi + done + + # If file wasn't changed, it's pre-existing + if ! $file_changed; then + echo "${line}" >> "${preexisting_issues_file}" + continue + fi + + # Check if issue exists in baseline + # Compare without line numbers since line numbers can change when code is added/removed + # Format is: file:line:col:linter:message + # We'll compare: file:linter:message + # Use f1,4,5- to capture field 5 and all remaining fields (handles colons in messages) + local file_linter_msg + file_linter_msg=$(echo "${line}" | cut -d: -f1,4,5-) + + if grep -Fq "${file_linter_msg}" "${baseline_file}" 2>/dev/null; then + echo "${line}" >> "${preexisting_issues_file}" + else + echo "${line}" >> "${new_issues_file}" + fi + done < "${current_file}" + fi +} + +# Output issue (basic format) +output_issue() { + echo "$1" +} + +# Generate basic report +generate_report() { + local new_issues_file="$1" + local preexisting_issues_file="$2" + + local new_count=0 + local preexisting_count=0 + + [[ -f "${new_issues_file}" ]] && new_count=$(wc -l < "${new_issues_file}" | tr -d ' ') + [[ -f "${preexisting_issues_file}" ]] && preexisting_count=$(wc -l < "${preexisting_issues_file}" | tr -d ' ') + + # Simple summary + echo "API Lint Diff Results" + echo "Baseline: ${BASELINE_BRANCH}" + echo "NEW: ${new_count}" + echo "PRE-EXISTING: ${preexisting_count}" + echo "" + + # Show NEW issues + if [[ ${new_count} -gt 0 ]]; then + echo "=== NEW ISSUES ===" + while IFS= read -r line; do + output_issue "${line}" + done < "${new_issues_file}" + echo "" + fi + + # Show PRE-EXISTING issues + if [[ ${preexisting_count} -gt 0 ]]; then + echo "=== PRE-EXISTING ISSUES ===" + while IFS= read -r line; do + output_issue "${line}" + done < "${preexisting_issues_file}" + echo "" + fi + + # Exit based on NEW issues count + if [[ ${new_count} -eq 0 ]]; then + echo -e "${GREEN}NO NEW ISSUES found. Lint check passed.${NC}" + if [[ ${preexisting_count} -gt 0 ]]; then + echo -e "${YELLOW}WARNING: Pre-existing issues detected. Please address them separately.${NC}" + fi + return 0 + else + echo -e "${RED}FAILED: ${new_count} new issue(s)${NC}" + return 1 + fi +} + +# Main execution +main() { + # Find golangci-lint + BASE_LINTER_PATH=$(find_golangci_lint) + + # Build custom linter with kube-api-linter plugin + LINTER_PATH="${BASE_LINTER_PATH}" + if CUSTOM_LINTER=$(build_custom_linter "${BASE_LINTER_PATH}"); then + LINTER_PATH="${CUSTOM_LINTER}" + fi + + # Convert to absolute path if needed + if [[ "${LINTER_PATH}" != /* ]]; then + LINTER_PATH="$(pwd)/${LINTER_PATH}" + fi + + # Create temporary config + create_temp_config + + # Get changed files + get_changed_files > "${TEMP_DIR}/changed_files.txt" + + # Run linter on current branch + REPO_ROOT="$(pwd)" + run_linter "${TEMP_DIR}/.golangci.yaml" "${TEMP_DIR}/current_output.txt" "${LINTER_PATH}" "${REPO_ROOT}" + parse_linter_output "${TEMP_DIR}/current_output.txt" "${TEMP_DIR}/current_parsed.txt" + + # Run linter on baseline + WORKTREE_DIR="${TEMP_DIR}/baseline_worktree" + if ! git worktree add --detach "${WORKTREE_DIR}" "${BASELINE_BRANCH}" 2>&1; then + echo -e "${RED}Error: Failed to create git worktree for baseline branch '${BASELINE_BRANCH}'${NC}" >&2 + echo -e "${RED}Please ensure the branch exists and try again.${NC}" >&2 + exit 1 + fi + run_linter "${TEMP_DIR}/.golangci.yaml" "${TEMP_DIR}/baseline_output.txt" "${LINTER_PATH}" "${WORKTREE_DIR}" + parse_linter_output "${TEMP_DIR}/baseline_output.txt" "${TEMP_DIR}/baseline_parsed.txt" + # Worktree cleanup is handled by the cleanup trap + + # Categorize issues + touch "${TEMP_DIR}/new_issues.txt" + touch "${TEMP_DIR}/preexisting_issues.txt" + + categorize_issues \ + "${TEMP_DIR}/current_parsed.txt" \ + "${TEMP_DIR}/baseline_parsed.txt" \ + "${TEMP_DIR}/changed_files.txt" \ + "${TEMP_DIR}/new_issues.txt" \ + "${TEMP_DIR}/preexisting_issues.txt" + + # Generate report + generate_report \ + "${TEMP_DIR}/new_issues.txt" \ + "${TEMP_DIR}/preexisting_issues.txt" + + return $? +} + +# Run main function +main "$@" diff --git a/hack/ci/custom-linters/analyzers/testdata/go.mod b/hack/ci/custom-linters/analyzers/testdata/go.mod index 23875e233..6a5571ff3 100644 --- a/hack/ci/custom-linters/analyzers/testdata/go.mod +++ b/hack/ci/custom-linters/analyzers/testdata/go.mod @@ -1,5 +1,5 @@ module testdata -go 1.24.3 +go 1.25.3 require github.com/go-logr/logr v1.4.3 diff --git a/hack/conftest/policy/README.md b/hack/conftest/policy/README.md new file mode 100644 index 000000000..21225dae3 --- /dev/null +++ b/hack/conftest/policy/README.md @@ -0,0 +1,70 @@ +# OPA Policies for NetworkPolicy Validation + +This directory contains [Open Policy Agent (OPA)](https://www.openpolicyagent.org/) Rego policies used by [conftest](https://www.conftest.dev/) to validate generated Kubernetes manifests. + +## Policy Files + +### olm-networkpolicies.rego + +Package: `main` + +Validates core OLM NetworkPolicy requirements: + +- **Deny-all policy**: Ensures a default deny-all NetworkPolicy exists with empty podSelector and both Ingress/Egress policy types +- **catalogd-controller-manager policy**: Validates the NetworkPolicy for catalogd: + - Ingress on port 7443 (Prometheus metrics scraping) + - Ingress on port 8443 (catalog metadata queries from operator-controller) + - Ingress on port 9443 (Kubernetes API server webhook access) + - General egress enabled +- **operator-controller-controller-manager policy**: Validates the NetworkPolicy for operator-controller: + - Ingress on port 8443 (Prometheus metrics scraping) + - General egress enabled (for pulling bundle images, connecting to catalogd, and Kubernetes API) + +### prometheus-networkpolicies.rego + +Package: `prometheus` + +Validates Prometheus NetworkPolicy requirements: + +- Ensures a NetworkPolicy exists that allows both ingress and egress traffic for prometheus pods + +## Usage + +These policies are automatically run as part of: + +- `make lint-helm` - Validates both helm/olmv1 and helm/prometheus charts (runs `main` and `prometheus` packages) +- `make manifests` - Generates and validates core OLM manifests using only `main` package policies + (Prometheus policies are intentionally skipped here, even if manifests include Prometheus resources; + they are validated via `make lint-helm`) + +### Running manually + +```bash +# Run all policies (main + prometheus namespaces) +helm template olmv1 helm/olmv1 | conftest test --policy hack/conftest/policy/ --combine -n main -n prometheus - + +# Run only OLM policies +helm template olmv1 helm/olmv1 | conftest test --policy hack/conftest/policy/ --combine -n main - + +# Run only prometheus policies +helm template prometheus helm/prometheus | conftest test --policy hack/conftest/policy/ --combine -n prometheus - +``` + +### Excluding policies + +Use the `-n` (namespace) flag to selectively run policies: + +```bash +# Skip prometheus policies +conftest test --policy hack/conftest/policy/ --combine -n main + +# Skip OLM policies +conftest test --policy hack/conftest/policy/ --combine -n prometheus +``` + +## Adding New Policies + +1. Add new rules to an existing `.rego` file or create a new one +2. Use `package main` for policies that should run by default on all manifests +3. Use a custom package name (e.g., `package prometheus`) for optional policies +4. Update the Makefile targets if new namespaces need to be included diff --git a/hack/conftest/policy/olm-networkpolicies.rego b/hack/conftest/policy/olm-networkpolicies.rego new file mode 100644 index 000000000..df0c81426 --- /dev/null +++ b/hack/conftest/policy/olm-networkpolicies.rego @@ -0,0 +1,160 @@ +package main + +import rego.v1 + +# Check that a deny-all NetworkPolicy exists +# A deny-all policy has: +# - podSelector: {} (empty, applies to all pods) +# - policyTypes containing both "Ingress" and "Egress" +# - No ingress or egress rules defined + +is_deny_all(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + + # podSelector must be empty (applies to all pods) + count(policy.spec.podSelector) == 0 + + # Must have both Ingress and Egress policy types + policy_types := {t | some t in policy.spec.policyTypes} + policy_types["Ingress"] + policy_types["Egress"] + + # Must not have any ingress rules + not policy.spec.ingress + + # Must not have any egress rules + not policy.spec.egress +} + +has_deny_all_policy if { + some i in numbers.range(0, count(input) - 1) + is_deny_all(input[i].contents) +} + +deny contains msg if { + not has_deny_all_policy + msg := "No deny-all NetworkPolicy found. A NetworkPolicy with empty podSelector, policyTypes [Ingress, Egress], and no ingress/egress rules is required." +} + +# Check that a NetworkPolicy exists for catalogd-controller-manager that: +# - Allows ingress on TCP ports 7443, 8443, 9443 +# - Allows general egress traffic + +is_catalogd_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + policy.spec.podSelector.matchLabels["control-plane"] == "catalogd-controller-manager" +} + +catalogd_policies contains policy if { + some i in numbers.range(0, count(input) - 1) + policy := input[i].contents + is_catalogd_policy(policy) +} + +catalogd_ingress_ports contains port if { + some policy in catalogd_policies + some rule in policy.spec.ingress + some port in rule.ports + port.protocol == "TCP" +} + +catalogd_ingress_port_numbers contains num if { + some port in catalogd_ingress_ports + num := port.port +} + +catalogd_has_egress if { + some policy in catalogd_policies + policy.spec.egress +} + +deny contains msg if { + count(catalogd_policies) == 0 + msg := "No NetworkPolicy found for catalogd-controller-manager. A NetworkPolicy allowing ingress on TCP ports 7443, 8443, 9443 and general egress is required." +} + +deny contains msg if { + count(catalogd_policies) > 1 + msg := sprintf("Expected exactly 1 NetworkPolicy for catalogd-controller-manager, found %d.", [count(catalogd_policies)]) +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[7443] + msg := "Allow traffic to port 7443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[8443] + msg := "Allow traffic to port 8443. Permit clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[9443] + msg := "Allow traffic to port 9443. Permit Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_has_egress + msg := "Missing egress rules in catalogd-controller-manager NetworkPolicy. General egress is required to enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server." +} + +# Check that a NetworkPolicy exists for operator-controller-controller-manager that: +# - Allows ingress on TCP port 8443 +# - Allows general egress traffic + +is_operator_controller_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + policy.spec.podSelector.matchLabels["control-plane"] == "operator-controller-controller-manager" +} + +operator_controller_policies contains policy if { + some i in numbers.range(0, count(input) - 1) + policy := input[i].contents + is_operator_controller_policy(policy) +} + +operator_controller_ingress_ports contains port if { + some policy in operator_controller_policies + some rule in policy.spec.ingress + some port in rule.ports + port.protocol == "TCP" +} + +operator_controller_ingress_port_numbers contains num if { + some port in operator_controller_ingress_ports + num := port.port +} + +operator_controller_has_egress if { + some policy in operator_controller_policies + policy.spec.egress +} + +deny contains msg if { + count(operator_controller_policies) == 0 + msg := "No NetworkPolicy found for operator-controller-controller-manager. A NetworkPolicy allowing ingress on TCP port 8443 and general egress is required." +} + +deny contains msg if { + count(operator_controller_policies) > 1 + msg := sprintf("Expected exactly 1 NetworkPolicy for operator-controller-controller-manager, found %d.", [count(operator_controller_policies)]) +} + +deny contains msg if { + count(operator_controller_policies) == 1 + not operator_controller_ingress_port_numbers[8443] + msg := "Allow traffic to port 8443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health." +} + +deny contains msg if { + count(operator_controller_policies) == 1 + not operator_controller_has_egress + msg := "Missing egress rules in operator-controller-controller-manager NetworkPolicy. General egress is required to enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server." +} diff --git a/hack/conftest/policy/prometheus-networkpolicies.rego b/hack/conftest/policy/prometheus-networkpolicies.rego new file mode 100644 index 000000000..c37158250 --- /dev/null +++ b/hack/conftest/policy/prometheus-networkpolicies.rego @@ -0,0 +1,33 @@ +package prometheus + +import rego.v1 + +# Check that a NetworkPolicy exists that allows both ingress and egress traffic to prometheus pods +is_prometheus_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + + # Must target prometheus pods + policy.spec.podSelector.matchLabels["app.kubernetes.io/name"] == "prometheus" + + # Must have both Ingress and Egress policy types + policy_types := {t | some t in policy.spec.policyTypes} + policy_types["Ingress"] + policy_types["Egress"] + + # Must have ingress rules defined (allowing traffic) + policy.spec.ingress + + # Must have egress rules defined (allowing traffic) + policy.spec.egress +} + +has_prometheus_policy if { + some i in numbers.range(0, count(input) - 1) + is_prometheus_policy(input[i].contents) +} + +deny contains msg if { + not has_prometheus_policy + msg := "No NetworkPolicy found that allows both ingress and egress traffic to prometheus pods. A NetworkPolicy targeting prometheus pods with ingress and egress rules is required." +} diff --git a/hack/kind-config/containerd/certs.d/go.mod b/hack/kind-config/containerd/certs.d/go.mod index adbe39415..fa9ef1076 100644 --- a/hack/kind-config/containerd/certs.d/go.mod +++ b/hack/kind-config/containerd/certs.d/go.mod @@ -1,6 +1,6 @@ module hack-cert.d -go 1.24.6 +go 1.25.3 // This file is present in the certs.d directory to ensure that // certs.d/host:port directories are not included in the main go diff --git a/hack/tools/test-profiling/go.mod b/hack/tools/test-profiling/go.mod index df225c427..11c55a0d8 100644 --- a/hack/tools/test-profiling/go.mod +++ b/hack/tools/test-profiling/go.mod @@ -1,6 +1,6 @@ module github.com/operator-framework/operator-controller/hack/tools/test-profiling -go 1.24.6 +go 1.25.3 require ( github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d diff --git a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml index c78a57b92..7508ab775 100644 --- a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml +++ b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml @@ -29,7 +29,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -51,29 +51,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -81,19 +76,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -101,15 +95,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -120,25 +111,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -221,12 +210,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -244,31 +232,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -329,11 +316,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -342,14 +327,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -383,12 +368,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -407,19 +391,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml index 94f1d7121..0bbf9b988 100644 --- a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml +++ b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml @@ -29,7 +29,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -51,29 +51,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -81,19 +76,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -101,15 +95,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -120,25 +111,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -221,12 +210,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -244,31 +232,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -329,11 +316,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -342,14 +327,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -383,12 +368,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -407,19 +391,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml index 0d1bbd71c..55687b567 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -59,9 +59,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -69,21 +69,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -99,37 +97,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -151,16 +147,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -172,24 +167,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -218,11 +211,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -233,30 +226,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -286,13 +278,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -319,12 +310,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -372,35 +360,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -478,13 +465,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -592,9 +578,9 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -603,12 +589,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -673,17 +659,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -693,8 +678,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml index a0983e41f..e1316237c 100644 --- a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml @@ -59,37 +59,35 @@ spec: properties: install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -111,16 +109,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -132,24 +129,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -178,11 +173,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -193,30 +188,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -246,13 +240,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -279,12 +272,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -332,35 +322,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -438,13 +427,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -470,21 +458,21 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -549,17 +537,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -569,8 +556,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml index d6fec9b5f..44c5bdea2 100644 --- a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml +++ b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml @@ -5,6 +5,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: diff --git a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml index fa4b11aca..ce1ff3c41 100644 --- a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml +++ b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml @@ -17,6 +17,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: diff --git a/kind-config/kind-config-2node.yaml b/kind-config/kind-config-2node.yaml new file mode 100644 index 000000000..5532a9932 --- /dev/null +++ b/kind-config/kind-config-2node.yaml @@ -0,0 +1,45 @@ +apiVersion: kind.x-k8s.io/v1alpha4 +kind: Cluster +nodes: + - role: control-plane + extraPortMappings: + # e2e image registry service's NodePort + - containerPort: 30000 + hostPort: 30000 + listenAddress: "127.0.0.1" + protocol: tcp + # prometheus metrics service's NodePort + - containerPort: 30900 + hostPort: 30900 + listenAddress: "127.0.0.1" + protocol: tcp + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + enable-admission-plugins: OwnerReferencesPermissionEnforcement + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + taints: [] + extraMounts: + - hostPath: ./hack/kind-config/containerd/certs.d + containerPath: /etc/containerd/certs.d + - role: control-plane + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + taints: [] + extraMounts: + - hostPath: ./hack/kind-config/containerd/certs.d + containerPath: /etc/containerd/certs.d +containerdConfigPatches: + - |- + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" diff --git a/kind-config.yaml b/kind-config/kind-config.yaml similarity index 100% rename from kind-config.yaml rename to kind-config/kind-config.yaml diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index 1b583d207..fbc5b4a53 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -152,6 +152,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: @@ -211,7 +215,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -233,29 +237,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -263,19 +262,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -283,15 +281,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -302,25 +297,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -403,12 +396,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -426,31 +418,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -511,11 +502,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -524,14 +513,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -565,12 +554,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -589,19 +577,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -959,9 +944,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -969,21 +954,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -999,37 +982,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -1051,16 +1032,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -1072,24 +1052,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -1118,11 +1096,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -1133,30 +1111,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -1186,13 +1163,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -1219,12 +1195,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1272,35 +1245,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1378,13 +1350,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1492,9 +1463,9 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -1503,12 +1474,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1573,17 +1544,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1593,8 +1563,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver @@ -2159,6 +2129,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 7bff36748..22c7db269 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -176,7 +176,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -198,29 +198,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -228,19 +223,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -248,15 +242,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -267,25 +258,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -368,12 +357,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -391,31 +379,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -476,11 +463,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -489,14 +474,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -530,12 +515,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -554,19 +538,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -924,9 +905,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -934,21 +915,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -964,37 +943,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -1016,16 +993,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -1037,24 +1013,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -1083,11 +1057,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -1098,30 +1072,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -1151,13 +1124,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -1184,12 +1156,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1237,35 +1206,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1343,13 +1311,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1457,9 +1424,9 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -1468,12 +1435,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1538,17 +1505,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1558,8 +1524,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/manifests/standard-e2e.yaml b/manifests/standard-e2e.yaml index 1aed38ba9..9b8b95c9d 100644 --- a/manifests/standard-e2e.yaml +++ b/manifests/standard-e2e.yaml @@ -152,6 +152,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: @@ -211,7 +215,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -233,29 +237,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -263,19 +262,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -283,15 +281,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -302,25 +297,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -403,12 +396,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -426,31 +418,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -511,11 +502,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -524,14 +513,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -565,12 +554,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -589,19 +577,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -684,37 +669,35 @@ spec: properties: install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -736,16 +719,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -757,24 +739,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -803,11 +783,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -818,30 +798,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -871,13 +850,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -904,12 +882,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -957,35 +932,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1063,13 +1037,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1095,21 +1068,21 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1174,17 +1147,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1194,8 +1166,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver @@ -1760,6 +1732,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: diff --git a/manifests/standard.yaml b/manifests/standard.yaml index 34cc57918..b5166be98 100644 --- a/manifests/standard.yaml +++ b/manifests/standard.yaml @@ -176,7 +176,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -198,29 +198,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -228,19 +223,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -248,15 +242,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -267,25 +258,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -368,12 +357,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -391,31 +379,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -476,11 +463,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -489,14 +474,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -530,12 +515,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -554,19 +538,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -649,37 +630,35 @@ spec: properties: install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -701,16 +680,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -722,24 +700,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -768,11 +744,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -783,30 +759,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -836,13 +811,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -869,12 +843,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -922,35 +893,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1028,13 +998,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1060,21 +1029,21 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1139,17 +1108,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1159,8 +1127,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/openshift/catalogd/manifests-experimental.yaml b/openshift/catalogd/manifests-experimental.yaml index 4dacdee86..5ac59edcf 100644 --- a/openshift/catalogd/manifests-experimental.yaml +++ b/openshift/catalogd/manifests-experimental.yaml @@ -121,7 +121,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -143,29 +143,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -173,19 +168,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -193,15 +187,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -212,25 +203,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -290,12 +279,11 @@ spec: rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -311,31 +299,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -395,11 +382,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -407,14 +392,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -435,12 +420,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -456,19 +440,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/openshift/catalogd/manifests.yaml b/openshift/catalogd/manifests.yaml index 68b6c87f3..afefe4a28 100644 --- a/openshift/catalogd/manifests.yaml +++ b/openshift/catalogd/manifests.yaml @@ -121,7 +121,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -143,29 +143,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -173,19 +168,18 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. @@ -193,15 +187,12 @@ spec: type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -212,25 +203,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -290,12 +279,11 @@ spec: rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -311,31 +299,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -395,11 +382,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -407,14 +392,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -435,12 +420,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -456,19 +440,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index 7ccb483b7..806d5be03 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -146,9 +146,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -156,21 +156,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -184,37 +182,35 @@ spec: rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -234,16 +230,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -255,24 +250,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -298,11 +291,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -313,30 +306,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -366,13 +358,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -396,12 +387,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -447,35 +435,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -553,13 +540,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -662,9 +648,9 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -673,12 +659,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -741,25 +727,24 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml index 091dfe26a..29ebdf97a 100644 --- a/openshift/operator-controller/manifests.yaml +++ b/openshift/operator-controller/manifests.yaml @@ -146,37 +146,35 @@ spec: properties: install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -196,16 +194,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -217,24 +214,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -260,11 +255,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -275,30 +270,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -328,13 +322,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -358,12 +351,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -409,35 +399,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -515,13 +504,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -544,21 +532,21 @@ spec: description: |- The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -621,25 +609,24 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/requirements.txt b/requirements.txt index 35b0e3fc6..63b40fbe2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ Babel==2.17.0 -beautifulsoup4==4.14.2 +beautifulsoup4==4.14.3 certifi==2025.11.12 charset-normalizer==3.4.4 click==8.3.1 @@ -19,9 +19,9 @@ mkdocs-material-extensions==1.3.1 packaging==25.0 paginate==0.5.7 pathspec==0.12.1 -platformdirs==4.5.0 +platformdirs==4.5.1 Pygments==2.19.2 -pymdown-extensions==10.17.2 +pymdown-extensions==10.19 pyquery==2.0.1 python-dateutil==2.9.0.post0 PyYAML==6.0.3 @@ -31,5 +31,5 @@ regex==2025.11.3 requests==2.32.5 six==1.17.0 soupsieve==2.8 -urllib3==2.6.0 +urllib3==2.6.2 watchdog==6.0.0 diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 000000000..c3483e518 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,350 @@ +# E2E Tests - Godog Framework + +This directory contains end-to-end (e2e) tests, written using the [Godog](https://github.com/cucumber/godog) framework. + +## Overview + +### What is Godog/BDD/Cucumber? + +Godog is a Behavior-Driven Development (BDD) framework that allows you to write tests in a human-readable format called +[Gherkin](https://cucumber.io/docs/gherkin/reference/). Tests are written as scenarios using Given-When-Then syntax, making them accessible to both technical and +non-technical stakeholders. + +**Benefits:** + +- **Readable**: Tests serve as living documentation +- **Maintainable**: Reusable step definitions reduce code duplication +- **Collaborative**: Product owners and developers share the same test specifications +- **Structured**: Clear separation between test scenarios and implementation + +## Project Structure + +``` +test/e2e/ +├── README.md # This file +├── features_test.go # Test runner and suite initialization +├── features/ # Gherkin feature files +│ ├── install.feature # ClusterExtension installation scenarios +│ ├── update.feature # ClusterExtension update scenarios +│ ├── recover.feature # Recovery scenarios +│ ├── status.feature # ClusterExtension status scenarios +│ └── metrics.feature # Metrics endpoint scenarios +└── steps/ # Step definitions and test utilities + ├── steps.go # Step definition implementations + ├── hooks.go # Test hooks and scenario context + └── testdata/ # Test data (RBAC templates, catalogs) + ├── rbac-template.yaml + ├── cluster-admin-rbac-template.yaml + ├── metrics-reader-rbac-template.yaml + ├── test-catalog-template.yaml + ├── extra-catalog-template.yaml + └── ... +``` + +## Architecture + +### 1. Test Runner (`features_test.go`) + +The main test entry point that configures and runs the Godog test suite. + +### 2. Feature Files (`features/*.feature`) + +Gherkin files that describe test scenarios in natural language. + +**Structure:** + +```gherkin +Feature: [Feature Name] + [Feature description] + + Background: + [Common setup steps for all scenarios] + + Scenario: [Scenario Name] + Given [precondition] + When [action] + Then [expected result] + And [additional assertions] +``` + +**Example:** + +```gherkin +Feature: Install ClusterExtension + + Background: + Given OLM is available + And "test" catalog serves bundles + And Service account "olm-sa" with needed permissions is available in test namespace + + Scenario: Install latest available version from the default channel + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + ... + """ + Then ClusterExtension is rolled out + And ClusterExtension is available +``` + +### 3. Step Definitions (`steps/steps.go`) + +Go functions that implement the steps defined in feature files. Each step is registered with a regex pattern that +matches the Gherkin text. + +**Registration:** + +```go +func RegisterSteps(sc *godog.ScenarioContext) { +sc.Step(`^OLM is available$`, OLMisAvailable) +sc.Step(`^bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) +sc.Step(`^ClusterExtension is applied$`, ResourceIsApplied) +// ... more steps +} +``` + +**Step Implementation Pattern:** + +```go +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + json.Unmarshal([]byte(v), &bundle) + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} +``` + +### 4. Hooks and Context (`steps/hooks.go`) + +Manages test lifecycle and scenario-specific context. + +**Hooks:** + +- `CheckFeatureTags`: Skips scenarios based on feature gate tags (e.g., `@WebhookProviderCertManager`) +- `CreateScenarioContext`: Creates unique namespace and names for each scenario +- `ScenarioCleanup`: Cleans up resources after each scenario + +**Variable Substitution:** + +Replaces `${TEST_NAMESPACE}`, `${NAME}`, and `${CATALOG_IMG}` with scenario-specific values. + +## Writing Tests + +### 1. Create a Feature File + +Create a new `.feature` file in `test/e2e/features/`: + +```gherkin +Feature: Your Feature Name + Description of what this feature tests + + Background: + Given OLM is available + And "test" catalog serves bundles + + Scenario: Your scenario description + When [some action] + Then [expected outcome] +``` + +### 2. Implement Step Definitions + +Add step implementations in `steps/steps.go`: + +```go +func RegisterSteps(sc *godog.ScenarioContext) { + // ... existing steps + sc.Step(`^your step pattern "([^"]+)"$`, YourStepFunction) +} + +func YourStepFunction(ctx context.Context, param string) error { + sc := scenarioCtx(ctx) + // Implementation + return nil +} +``` + +### 3. Use Existing Steps + +Leverage existing steps for common operations: + +- **Setup**: `Given OLM is available`, `And "test" catalog serves bundles` +- **Resource Management**: `When ClusterExtension is applied`, `And resource is applied` +- **Assertions**: `Then ClusterExtension is available`, `And bundle "..." is installed` +- **Conditions**: `Then ClusterExtension reports Progressing as True with Reason Retrying:` + +### 4. Variable Substitution + +Use these variables in YAML templates: + +- `${NAME}`: Scenario-specific ClusterExtension name (e.g., `ce-123`) +- `${TEST_NAMESPACE}`: Scenario-specific namespace (e.g., `ns-123`) +- `${CATALOG_IMG}`: Catalog image reference (defaults to in-cluster registry, overridable via `CATALOG_IMG` env var) + +### 5. Feature Tags + +Use tags to conditionally run scenarios based on feature gates: + +```gherkin +@WebhookProviderCertManager +Scenario: Install operator having webhooks +``` + +Scenarios are skipped if the feature gate is not enabled on the deployed controller. + +## Running Tests + +### Run All Tests + +```bash +make test-e2e +``` + +or + +```bash +make test-experimental-e2e +``` + + +### Run Specific Feature + +```bash +go test test/e2e/features_test.go -- features/install.feature +``` + +### Run Specific Scenario by Tag + +```bash +go test test/e2e/features_test.go --godog.tags="@WebhookProviderCertManager" +``` + +### Run with Debug Logging + +```bash +go test -v test/e2e/features_test.go --log.debug +``` + +### CLI Options + +Godog options can be passed after `--`: + +```bash +go test test/e2e/features_test.go \ + --godog.format=pretty \ + --godog.tags="@WebhookProviderCertManager" +``` + +Available formats: `pretty`, `cucumber`, `progress`, `junit` + +**Custom Flags:** + +- `--log.debug`: Enable debug logging (development mode) +- `--k8s.cli=`: Specify path to Kubernetes CLI (default: `kubectl`) + - Useful for using `oc` or a specific kubectl binary + +**Example:** + +```bash +go test test/e2e/features_test.go --log.debug --k8s.cli=oc +``` + +### Environment Variables + +- `KUBECONFIG`: Path to kubeconfig file (defaults to `~/.kube/config`) +- `E2E_SUMMARY_OUTPUT`: Path to write test summary (optional) +- `CATALOG_IMG`: Override default catalog image reference (optional) +- `LOCAL_REGISTRY_HOST`: Local registry host for catalog images + +## Design Patterns + +### 1. Scenario Isolation + +Each scenario runs in its own namespace with unique resource names, ensuring complete isolation: + +- Namespace: `ns-{scenario-id}` +- ClusterExtension: `ce-{scenario-id}` + +### 2. Automatic Cleanup + +The `ScenarioCleanup` hook ensures all resources are deleted after each scenario: + +- Kills background processes (e.g., kubectl port-forward) +- Deletes ClusterExtensions +- Deletes namespaces +- Deletes added resources + +### 3. Declarative Resource Management + +Resources are managed declaratively using YAML templates embedded in feature files as docstrings: + +```gherkin +When ClusterExtension is applied +""" + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + ... + """ +``` + +### 4. Polling with Timeouts + +All asynchronous operations use `waitFor` with consistent timeout (300s) and tick (1s): + +```go +waitFor(ctx, func() bool { + // Check condition + return conditionMet +}) +``` + +### 5. Feature Gate Detection + +Tests automatically detect enabled feature gates from the running controller and skip scenarios that require disabled +features. + +## Common Step Patterns + +A list of available, implemented steps can be obtained by running: + +```shell +go test test/e2e/features_test.go -d +``` + +## Best Practices + +1. **Keep scenarios focused**: Each scenario should test one specific behavior +2. **Use Background wisely**: Common setup steps belong in Background +3. **Reuse steps**: Leverage existing step definitions before creating new ones +4. **Meaningful names**: Scenario names should clearly describe what is being tested +5. **Avoid implementation details**: Focus on behavior, not implementation + +## References + +- [Godog Documentation](https://github.com/cucumber/godog) +- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/) +- [Cucumber Best Practices](https://cucumber.io/docs/guides/10-minute-tutorial/) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go deleted file mode 100644 index b1994d7e0..000000000 --- a/test/e2e/cluster_extension_install_test.go +++ /dev/null @@ -1,798 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "slices" - "testing" - "time" - - "github.com/google/go-containerregistry/pkg/crane" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - artifactName = "operator-controller-e2e" - pollDuration = time.Minute - pollInterval = time.Second - testCatalogRefEnvVar = "CATALOG_IMG" - testCatalogName = "test-catalog" -) - -func TestClusterExtensionInstallRegistry(t *testing.T) { - type testCase struct { - name string - packageName string - } - for _, tc := range []testCase{ - { - name: "no registry configuration necessary", - packageName: "test", - }, - { - // NOTE: This test requires an extra configuration in /etc/containers/registries.conf, which is mounted - // for this e2e via the ./config/components/e2e/registries-conf kustomize component as part of the e2e component. - // The goal here is to prove that "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" is - // mapped to the "real" registry hostname ("docker-registry.operator-controller-e2e.svc.cluster.local:5000"). - name: "package requires mirror registry configuration in /etc/containers/registries.conf", - packageName: "test-mirrored", - }, - } { - t.Run(tc.name, func(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: tc.packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By eventually creating the NetworkPolicy named 'test-operator-network-policy'") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - var np networkingv1.NetworkPolicy - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: "test-operator-network-policy", Namespace: ns.Name}, &np)) - }, pollDuration, pollInterval) - - t.Log("By verifying that no templating occurs for registry+v1 bundle manifests") - cm := corev1.ConfigMap{} - require.NoError(t, c.Get(context.Background(), types.NamespacedName{Namespace: ns.Name, Name: "test-configmap"}, &cm)) - require.Contains(t, cm.Annotations, "shouldNotTemplate") - require.Contains(t, cm.Annotations["shouldNotTemplate"], "{{ $labels.namespace }}") - }) - } -} - -func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { - // NOTE: Like 'TestClusterExtensionInstallRegistry', this test also requires extra configuration in /etc/containers/registries.conf - packageName := "dynamic" - - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It updates the registries.conf file contents") - cm := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-registries-conf", - Namespace: "olmv1-system", - }, - Data: map[string]string{ - "registries.conf": `[[registry]] -prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" -location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, - }, - } - require.NoError(t, c.Update(context.Background(), &cm)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, 2*time.Minute, pollInterval) - - // Give the check 2 minutes instead of the typical 1 for the pod's - // files to update from the configmap change. - // The theoretical max time is the kubelet sync period of 1 minute + - // ConfigMap cache TTL of 1 minute = 2 minutes - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, 2*time.Minute, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - extraCatalogName := fmt.Sprintf("extra-test-catalog-%s", rand.String(8)) - extraCatalog, err := CreateTestCatalog(context.Background(), extraCatalogName, os.Getenv(testCatalogRefEnvVar)) - require.NoError(t, err) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, c.Delete(context.Background(), cat)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - }(extraCatalog) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves to multiple bundle paths") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a failed resolution with multiple bundles") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - // Catalog names are sorted alphabetically in the error message - catalogs := []string{extensionCatalog.Name, extraCatalog.Name} - slices.Sort(catalogs) - expectedMessage := fmt.Sprintf("in multiple catalogs with the same priority %v", catalogs) - require.Contains(ct, cond.Message, expectedMessage) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - // No Selector since this is an exact version match - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - require.Equal(ct, - &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ - Name: "test-operator.1.0.0", - Version: "1.0.0", - }}, - clusterExtension.Status.Install, - ) - - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does not allow to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting an unsatisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Equal(ct, "error upgrading from currently installed version \"1.0.0\": no bundles found for package \"test\" matching version \"1.2.0\"", cond.Message) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does allow to upgrade the ClusterExtension to any of the successor versions within non-zero major version") - t.Log("By updating the ClusterExtension resource by skipping versions") - // 1.0.1 replaces 1.0.0 in the test catalog - clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a catalog is patched with new ImageRef") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "olm.operatorframework.io/metadata.name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{extensionCatalog.Name}, - }, - }, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // patch imageRef tag on test-catalog image with v2 image - t.Log("By patching the catalog ImageRef to point to the v2 catalog") - updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("CLUSTER_REGISTRY_HOST")) - err := patchTestCatalog(context.Background(), extensionCatalog.Name, updatedCatalogImage) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.Contains(ct, clusterExtension.Status.Install.Bundle.Version, "1.3.0") - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a new catalog is available") - - // Tag the image with the new tag - var err error - v1Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V1")) - err = crane.Tag(v1Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - - // create a test-catalog with latest image tag - catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8)) - latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST")) - extensionCatalog, err := CreateTestCatalog(context.Background(), catalogName, latestCatalogImage) - require.NoError(t, err) - clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8)) - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterExtensionName, - }, - } - ns, err := CreateNamespace(context.Background(), clusterExtensionName) - require.NoError(t, err) - sa, err := CreateServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) - require.NoError(t, err) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // update tag on test-catalog image with v2 image - t.Log("By updating the catalog tag to point to the v2 catalog") - v2Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V2")) - err = crane.Tag(v2Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when managed content is changed") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It installs the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - }, pollDuration, pollInterval) - - t.Log("By deleting a managed resource") - testConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: clusterExtension.Spec.Namespace, - }, - } - require.NoError(t, c.Delete(context.Background(), testConfigMap)) - - t.Log("By eventually re-creating the managed resource") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - t.Log("By not creating the Namespace and ServiceAccount") - clusterExtension, extensionCatalog := TestInitClusterExtensionClusterCatalog(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, nil, nil) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Installed != True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.NotEqual(ct, metav1.ConditionTrue, cond.Status) - }, pollDuration, pollInterval) - - t.Log("By creating the Namespace and ServiceAccount") - sa, ns := TestInitServiceAccountNamespace(t, clusterExtension.Name) - defer TestCleanup(t, nil, nil, sa, ns) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after creating int the Namespace and ServiceAccount. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromExistingDeploymentWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("By creating a new Deployment that can not be adopted") - newDeployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-operator", - Namespace: clusterExtension.Name, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test-operator"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test-operator"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Command: []string{"sleep", "1000"}, - Image: "busybox", - ImagePullPolicy: corev1.PullAlways, - Name: "busybox", - SecurityContext: &corev1.SecurityContext{ - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(1000)), - AllowPrivilegeEscalation: ptr.To(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - }, - }, - }, - }, - }, - } - require.NoError(t, c.Create(context.Background(), newDeployment)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually failing to install the package successfully due to no adoption support") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - // TODO: We probably _should_ be testing the reason here, but helm and boxcutter applier have different reasons. - // Maybe we change helm to use "Absent" rather than "Failed" since the Progressing condition already captures - // the failure? - //require.Equal(ct, ocv1.ReasonFailed, cond.Reason) - require.Contains(ct, cond.Message, "No bundle installed") - }, pollDuration, pollInterval) - - t.Log("By deleting the new Deployment") - require.NoError(t, c.Delete(context.Background(), newDeployment)) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after deleting the Deployment. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/cluster_extension_revision_test.go b/test/e2e/cluster_extension_revision_test.go deleted file mode 100644 index 5c21e66ab..000000000 --- a/test/e2e/cluster_extension_revision_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "slices" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/features" - . "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -func TestClusterExtensionRevision(t *testing.T) { - SkipIfFeatureGateDisabled(t, string(features.BoxcutterRuntime)) - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.1", - // we would also like to force upgrade to 1.0.2, which is not within the upgrade path - UpgradeConstraintPolicy: ocv1.UpgradeConstraintPolicySelfCertified, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - var clusterExtensionRevision ocv1.ClusterExtensionRevision - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - require.Len(ct, clusterExtension.Status.ActiveRevisions, 1) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, clusterExtensionRevision.Name) - require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions) - }, pollDuration, pollInterval) - - t.Log("Check Deployment Availability Probe") - t.Log("By making the operator pod not ready") - podName := getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"}) - podExec(t, clusterExtension.Spec.Namespace, podName, []string{"rm", "/var/www/ready"}) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By propagating Available:False to ClusterExtension") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - }, pollDuration, pollInterval) - - t.Log("By making the operator pod ready") - podName = getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"}) - podExec(t, clusterExtension.Spec.Namespace, podName, []string{"touch", "/var/www/ready"}) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By propagating Available:True to ClusterExtension") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - }, pollDuration, pollInterval) - - t.Log("Check archiving") - t.Log("By upgrading the cluster extension to v1.2.0") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-2 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-2", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing, available, and installed as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By revision-1 eventually reporting Progressing:False:Archived and Available:Unknown:Archived conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionUnknown, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By upgrading the cluster extension to v1.0.2 containing bad image reference") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - clusterExtension.Spec.Source.Catalog.Version = "1.0.2" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-3 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-3", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting more than one active revision") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - require.Len(ct, clusterExtension.Status.ActiveRevisions, 2) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, fmt.Sprintf("%s-2", clusterExtension.Name)) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[1].Name, fmt.Sprintf("%s-3", clusterExtension.Name)) - require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions) - require.NotEmpty(ct, clusterExtension.Status.ActiveRevisions[1].Conditions) - }, pollDuration, pollInterval) -} - -func getPodName(t *testing.T, podNamespace string, matchingLabels client.MatchingLabels) string { - var podList corev1.PodList - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.List(context.Background(), &podList, client.InNamespace(podNamespace), matchingLabels)) - podList.Items = slices.DeleteFunc(podList.Items, func(pod corev1.Pod) bool { - // Ignore terminating pods - return pod.DeletionTimestamp != nil - }) - require.Len(ct, podList.Items, 1) - }, pollDuration, pollInterval) - return podList.Items[0].Name -} - -func podExec(t *testing.T, podNamespace string, podName string, cmd []string) { - req := cs.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(podNamespace).SubResource("exec") - req.VersionedParams(&corev1.PodExecOptions{ - Command: cmd, - Stdout: true, - }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(ctrl.GetConfigOrDie(), "POST", req.URL()) - require.NoError(t, err) - err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{Stdout: os.Stdout}) - require.NoError(t, err) -} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go deleted file mode 100644 index 847f5c753..000000000 --- a/test/e2e/e2e_suite_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -var ( - cfg *rest.Config - c client.Client - cs *kubernetes.Clientset -) - -const ( - testSummaryOutputEnvVar = "E2E_SUMMARY_OUTPUT" - latestImageTag = "latest" -) - -func TestMain(m *testing.M) { - cfg = ctrl.GetConfigOrDie() - - var err error - utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - utilruntime.Must(err) - - cs, err = kubernetes.NewForConfig(cfg) - utilruntime.Must(err) - - res := m.Run() - path := os.Getenv(testSummaryOutputEnvVar) - if path == "" { - fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") - } else { - err = testutil.PrintSummary(path) - if err != nil { - // Fail the run if alerts are found - fmt.Printf("%v", err) - os.Exit(1) - } - } - os.Exit(res) -} - -// patchTestCatalog will patch the existing clusterCatalog on the test cluster, provided -// the context, catalog name, and the image reference. It returns an error -// if any errors occurred while updating the catalog. -func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { - // Fetch the existing ClusterCatalog - catalog := &ocv1.ClusterCatalog{} - err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) - if err != nil { - return err - } - - // Update the ImageRef - catalog.Spec.Source.Image.Ref = newImageRef - - // Patch the ClusterCatalog - err = c.Update(ctx, catalog) - if err != nil { - return err - } - - return err -} diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature new file mode 100644 index 000000000..ba59ffe7d --- /dev/null +++ b/test/e2e/features/install.feature @@ -0,0 +1,299 @@ +Feature: Install ClusterExtension + + As an OLM user I would like to install a cluster extension from catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + + Scenario: Install latest available version + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + And resource "networkpolicy/test-operator-network-policy" is installed + And resource "configmap/test-configmap" is installed + And resource "deployment/test-operator" is installed + + @mirrored-registry + Scenario Outline: Install latest available version from mirrored registry + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "-operator.1.2.0" is installed in version "1.2.0" + And resource "networkpolicy/test-operator-network-policy" is installed + And resource "configmap/test-configmap" is installed + And resource "deployment/test-operator" is installed + + Examples: + | package-name | + | test-mirrored | + | dynamic | + + + Scenario: Report that bundle cannot be installed when it exists in multiple catalogs with same priority + Given ClusterCatalog "extra" serves bundles + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + """ + Then ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + found bundles for package "test" in multiple catalogs with the same priority [extra-catalog test-catalog] + """ + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting single namespace + Given ServiceAccount "olm-admin" in test namespace is cluster admin + And resource is applied + """ + apiVersion: v1 + kind: Namespace + metadata: + name: single-namespace-operator-target + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "single-namespace-operator.1.0.0" with version "1.0.0": + invalid ClusterExtension configuration: invalid configuration: required field "watchNamespace" is missing + """ + When ClusterExtension is updated to set config.watchNamespace field + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: single-namespace-operator-target # added + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension reports Installed as True + And bundle "single-namespace-operator.1.0.0" is installed in version "1.0.0" + And operator "single-namespace-operator" target namespace is "single-namespace-operator-target" + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting own namespace + Given ServiceAccount "olm-admin" in test namespace is cluster admin + And ClusterExtension is applied without the watchNamespace configuration + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: required + field "watchNamespace" is missing + """ + And ClusterExtension is updated to include the watchNamespace configuration + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: some-ns # added, but not own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: 'some-ns' + is not valid ownNamespaceInstallMode: invalid value "some-ns": watchNamespace + must be "${TEST_NAMESPACE}" (the namespace where the operator is installed) because this + operator only supports OwnNamespace install mode + """ + When ClusterExtension is updated to set watchNamespace to own namespace value + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: ${TEST_NAMESPACE} # own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And operator "own-namespace-operator" target namespace is "${TEST_NAMESPACE}" + + @WebhookProviderCertManager + Scenario: Install operator having webhooks + Given ServiceAccount "olm-admin" in test namespace is cluster admin + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: webhook-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource apply fails with error msg containing "Invalid value: false: Spec.Valid must be true" + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: false # webhook rejects it as invalid value + """ + And resource is applied + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: true + """ + And resource "webhooktest/${NAME}" matches + """ + apiVersion: webhook.operators.coreos.io/v2 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + conversion: + valid: true + mutate: true + """ + And resource "webhooktest.v1.webhook.operators.coreos.io/${NAME}" matches + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: true + mutate: true + """ diff --git a/test/e2e/features/metrics.feature b/test/e2e/features/metrics.feature new file mode 100644 index 000000000..ccb719198 --- /dev/null +++ b/test/e2e/features/metrics.feature @@ -0,0 +1,15 @@ +Feature: Exposed various metrics + + Background: + Given OLM is available + + Scenario Outline: component exposes metrics + Given ServiceAccount "metrics-reader" in test namespace has permissions to fetch "" metrics + When ServiceAccount "metrics-reader" sends request to "/metrics" endpoint of "" service + Then Prometheus metrics are returned in the response + + Examples: + | component | + | operator-controller | + | catalogd | + \ No newline at end of file diff --git a/test/e2e/features/recover.feature b/test/e2e/features/recover.feature new file mode 100644 index 000000000..0438f2d1a --- /dev/null +++ b/test/e2e/features/recover.feature @@ -0,0 +1,117 @@ +Feature: Recover cluster extension from errors that might occur during its lifetime + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + + + Scenario: Restore removed resource + Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is available + And resource "configmap/test-configmap" exists + When resource "configmap/test-configmap" is removed + Then resource "configmap/test-configmap" is eventually restored + + Scenario: Install ClusterExtension after target namespace becomes available + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + When ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + + Scenario: Install ClusterExtension after conflicting resource is removed + Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And resource is applied + """ + apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-operator + namespace: ${TEST_NAMESPACE} + spec: + replicas: 1 + selector: + matchLabels: + app: test-operator + template: + metadata: + labels: + app: test-operator + spec: + containers: + - command: + - "sleep" + args: + - "1000" + image: busybox:1.36 + imagePullPolicy: IfNotPresent + name: busybox + securityContext: + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + And ClusterExtension reports Installed as False + When resource "deployment/test-operator" is removed + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + And ClusterExtension reports Installed as True diff --git a/test/e2e/features/status.feature b/test/e2e/features/status.feature new file mode 100644 index 000000000..5c8a3141d --- /dev/null +++ b/test/e2e/features/status.feature @@ -0,0 +1,45 @@ +Feature: Report status of the managed ClusterExtension workload + + As an OLM user, I would like to see reported on ClusterExtension the availability + change of the managed workload. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + + @BoxcutterRuntime + Scenario: Report availability change when managed workload is not ready + When resource "deployment/test-operator" reports as not ready + Then ClusterExtension reports Available as False with Reason ProbeFailure + And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure + + @BoxcutterRuntime + Scenario: Report availability change when managed workload restores its readiness + Given resource "deployment/test-operator" reports as not ready + And ClusterExtension reports Available as False with Reason ProbeFailure + And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure + When resource "deployment/test-operator" reports as ready + Then ClusterExtension is available + And ClusterExtensionRevision "${NAME}-1" reports Available as True with Reason ProbesSucceeded \ No newline at end of file diff --git a/test/e2e/features/update.feature b/test/e2e/features/update.feature new file mode 100644 index 000000000..dee45e32a --- /dev/null +++ b/test/e2e/features/update.feature @@ -0,0 +1,244 @@ +Feature: Update ClusterExtension + + As an OLM user I would like to update a ClusterExtension from a catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + + Scenario: Update to a successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.0.1" + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.0.1" is installed in version "1.0.1" + + Scenario: Cannot update extension to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + """ + Then ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error upgrading from currently installed version "1.0.0": no bundles found for package "test" matching version "1.2.0" + """ + + Scenario: Force update to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + upgradeConstraintPolicy: SelfCertified + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + + @catalog-updates + Scenario: Auto update when new version becomes available in the new catalog image ref + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When ClusterCatalog "test" is updated to version "v2" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + Scenario: Auto update when new version becomes available in the same catalog image ref + Given "test" catalog image version "v1" is also tagged as "latest" + And ClusterCatalog "test" is updated to version "latest" + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When ClusterCatalog "test" image version "v2" is also tagged as "latest" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + @BoxcutterRuntime + Scenario: Each update creates a new revision + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + upgradeConstraintPolicy: SelfCertified + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.2.0" + Then bundle "test-operator.1.2.0" is installed in version "1.2.0" + And ClusterExtension is rolled out + And ClusterExtension is available + And ClusterExtension reports "${NAME}-2" as active revision + And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded + And ClusterExtensionRevision "${NAME}-2" reports Available as True with Reason ProbesSucceeded + And ClusterExtensionRevision "${NAME}-1" is archived + + @BoxcutterRuntime + Scenario: Report all active revisions on ClusterExtension + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + upgradeConstraintPolicy: SelfCertified + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.0.2" + Then ClusterExtension reports "${NAME}-1, ${NAME}-2" as active revisions + And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded + And ClusterExtensionRevision "${NAME}-2" reports Available as False with Reason ProbeFailure + diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go new file mode 100644 index 000000000..706c822ef --- /dev/null +++ b/test/e2e/features_test.go @@ -0,0 +1,75 @@ +package e2e + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/cucumber/godog" + "github.com/cucumber/godog/colors" + "github.com/spf13/pflag" + + testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" + "github.com/operator-framework/operator-controller/test/e2e/steps" +) + +var opts = godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + Output: colors.Colored(os.Stdout), + Concurrency: 1, + NoColors: true, +} + +func init() { + godog.BindCommandLineFlags("godog.", &opts) +} + +func TestMain(m *testing.M) { + // parse CLI arguments + pflag.Parse() + opts.Paths = pflag.Args() + + // run tests + sc := godog.TestSuite{ + TestSuiteInitializer: InitializeSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + if st := m.Run(); st > sc { + sc = st + } + switch sc { + // 0 - success + case 0: + + path := os.Getenv("E2E_SUMMARY_OUTPUT") + if path == "" { + fmt.Println("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") + } else { + if err := testutil.PrintSummary(path); err != nil { + // Fail the run if alerts are found + fmt.Printf("%v", err) + os.Exit(1) + } + } + return + + // 1 - failed + // 2 - command line usage error + // 128 - or higher, os signal related error exit codes + default: + log.Fatalf("non-zero status returned (%d), failed to run feature tests", sc) + } +} + +func InitializeSuite(tc *godog.TestSuiteContext) { + tc.BeforeSuite(steps.BeforeSuite) +} + +func InitializeScenario(sc *godog.ScenarioContext) { + steps.RegisterSteps(sc) + steps.RegisterHooks(sc) +} diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go deleted file mode 100644 index e1fbb90f3..000000000 --- a/test/e2e/metrics_test.go +++ /dev/null @@ -1,297 +0,0 @@ -// Package e2e contains end-to-end tests to verify that the metrics endpoints -// for both components. Metrics are exported and accessible by authorized users through -// RBAC and ServiceAccount tokens. -// -// These tests perform the following steps: -// 1. Create a ClusterRoleBinding to grant necessary permissions for accessing metrics. -// 2. Generate a ServiceAccount token for authentication. -// 3. Deploy a curl pod to interact with the metrics endpoint. -// 4. Wait for the curl pod to become ready. -// 5. Execute a curl command from the pod to validate the metrics endpoint. -// 6. Clean up all resources created during the test, such as the ClusterRoleBinding and curl pod. -// -//nolint:gosec -package e2e - -import ( - "bytes" - "context" - "fmt" - "io" - "os/exec" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/rand" - - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -// TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller -func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - client := testutil.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=operator-controller-controller-manager") - - config := NewMetricsTestConfig( - client, - curlNamespace, - componentNamespace, - "operator-controller-metrics-reader", - "operator-controller-metrics-binding", - "operator-controller-metrics-reader", - "oper-curl-metrics", - "app.kubernetes.io/name=operator-controller", - operatorControllerMetricsPort, - ) - - config.run(t) -} - -// TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd -func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - client := testutil.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=catalogd-controller-manager") - - config := NewMetricsTestConfig( - client, - curlNamespace, - componentNamespace, - "catalogd-metrics-reader", - "catalogd-metrics-binding", - "catalogd-metrics-reader", - "catalogd-curl-metrics", - "app.kubernetes.io/name=catalogd", - catalogdMetricsPort, - ) - - config.run(t) -} - -// MetricsTestConfig holds the necessary configurations for testing metrics endpoints. -type MetricsTestConfig struct { - client string - namespace string - componentNamespace string - clusterRole string - clusterBinding string - serviceAccount string - curlPodName string - componentSelector string - metricsPort int -} - -// NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig(client, namespace, componentNamespace, clusterRole, clusterBinding, serviceAccount, curlPodName, componentSelector string, metricsPort int) *MetricsTestConfig { - return &MetricsTestConfig{ - client: client, - namespace: namespace, - componentNamespace: componentNamespace, - clusterRole: clusterRole, - clusterBinding: clusterBinding, - serviceAccount: serviceAccount, - curlPodName: curlPodName, - componentSelector: componentSelector, - metricsPort: metricsPort, - } -} - -// run will execute all steps of those tests -func (c *MetricsTestConfig) run(t *testing.T) { - defer c.cleanup(t) - - c.createMetricsClusterRoleBinding(t) - token := c.getServiceAccountToken(t) - c.createCurlMetricsPod(t) - c.validate(t, token) -} - -// createMetricsClusterRoleBinding to binding and expose the metrics -func (c *MetricsTestConfig) createMetricsClusterRoleBinding(t *testing.T) { - t.Logf("Creating ClusterRoleBinding %s for %s in namespace %s", c.clusterBinding, c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, - "--clusterrole="+c.clusterRole, - "--serviceaccount="+c.namespace+":"+c.serviceAccount) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating ClusterRoleBinding: %s", string(output)) -} - -// getServiceAccountToken return the token requires to have access to the metrics -func (c *MetricsTestConfig) getServiceAccountToken(t *testing.T) string { - t.Logf("Creating ServiceAccount %q in namespace %q", c.serviceAccount, c.namespace) - output, err := exec.Command(c.client, "create", "serviceaccount", c.serviceAccount, "--namespace="+c.namespace).CombinedOutput() - require.NoError(t, err, "Error creating service account: %v", string(output)) - - t.Logf("Generating ServiceAccount token for %q in namespace %q", c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "--namespace", c.namespace) - tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) - require.NoError(t, err, "Error creating token: %s", string(tokenCombinedOutput)) - return string(bytes.TrimSpace(tokenOutput)) -} - -// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working -func (c *MetricsTestConfig) createCurlMetricsPod(t *testing.T) { - t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - cmd := exec.Command(c.client, "run", c.curlPodName, - "--image=quay.io/curl/curl:8.15.0", - "--namespace", c.namespace, - "--restart=Never", - "--overrides", `{ - "spec": { - "terminationGradePeriodSeconds": 0, - "containers": [{ - "name": "curl", - "image": "quay.io/curl/curl:8.15.0", - "command": ["sh", "-c", "sleep 3600"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": {"drop": ["ALL"]}, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": {"type": "RuntimeDefault"} - } - }], - "serviceAccountName": "`+c.serviceAccount+`" - } - }`) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating curl pod: %s", string(output)) -} - -// validate verifies if is possible to access the metrics from all pods -func (c *MetricsTestConfig) validate(t *testing.T, token string) { - t.Log("Waiting for the curl pod to be ready") - waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "--namespace", c.namespace, "--timeout=60s") - waitOutput, waitErr := waitCmd.CombinedOutput() - require.NoError(t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) - - // Get all pod IPs for the component - podIPs := c.getComponentPodIPs(t) - require.NotEmpty(t, podIPs, "No pod IPs found for component") - t.Logf("Found %d pod(s) to scrape metrics from", len(podIPs)) - - // Validate metrics endpoint for each pod - for i, podIP := range podIPs { - // Build metrics URL with pod FQDN: ..pod.cluster.local - // Convert IP dots to dashes (e.g., 10.244.0.11 -> 10-244-0-11) - podIPDashes := strings.ReplaceAll(podIP, ".", "-") - metricsURL := fmt.Sprintf("https://%s.%s.pod.cluster.local:%d/metrics", podIPDashes, c.componentNamespace, c.metricsPort) - t.Logf("Validating metrics endpoint for pod %d/%d: %s", i+1, len(podIPs), metricsURL) - - curlCmd := exec.Command(c.client, "exec", c.curlPodName, "--namespace", c.namespace, "--", - "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, metricsURL) - output, err := curlCmd.CombinedOutput() - require.NoError(t, err, "Error calling metrics endpoint %s: %s", metricsURL, string(output)) - require.Contains(t, string(output), "200 OK", "Metrics endpoint %s did not return 200 OK", metricsURL) - t.Logf("Successfully scraped metrics from pod %d/%d", i+1, len(podIPs)) - } -} - -// cleanup removes the created resources. Uses a context with timeout to prevent hangs. -func (c *MetricsTestConfig) cleanup(t *testing.T) { - type objDesc struct { - resourceName string - name string - namespace string - } - objects := []objDesc{ - {"clusterrolebinding", c.clusterBinding, ""}, - {"pod", c.curlPodName, c.namespace}, - {"serviceaccount", c.serviceAccount, c.namespace}, - {"namespace", c.namespace, ""}, - } - - t.Log("Cleaning up resources") - for _, obj := range objects { - args := []string{"delete", obj.resourceName, obj.name, "--ignore-not-found=true", "--force"} - if obj.namespace != "" { - args = append(args, "--namespace", obj.namespace) - } - output, err := exec.Command(c.client, args...).CombinedOutput() - require.NoError(t, err, "Error deleting %q %q in namespace %q: %v", obj.resourceName, obj.name, obj.namespace, string(output)) - } - - // Create a context with a 60-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - for _, obj := range objects { - err := waitForDeletion(ctx, c.client, obj.resourceName, obj.name, obj.namespace) - require.NoError(t, err, "Error deleting %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - t.Logf("Successfully deleted %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - } -} - -// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted -// or until the 60-second timeout is reached. -func waitForDeletion(ctx context.Context, client, resourceType, resourceName, resourceNamespace string) error { - args := []string{"wait", "--for=delete", "--timeout=60s", resourceType, resourceName} - if resourceNamespace != "" { - args = append(args, "--namespace", resourceNamespace) - } - cmd := exec.CommandContext(ctx, client, args...) - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) - } - return nil -} - -// createRandomNamespace creates a random namespace -func createRandomNamespace(t *testing.T, client string) string { - nsName := fmt.Sprintf("testns-%s", rand.String(8)) - - cmd := exec.Command(client, "create", "namespace", nsName) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating namespace: %s", string(output)) - - return nsName -} - -// getComponentNamespace returns the namespace where operator-controller or catalogd is running -func getComponentNamespace(t *testing.T, client, selector string) string { - cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error determining namespace: %s", string(output)) - - namespace := string(bytes.TrimSpace(output)) - if namespace == "" { - t.Fatal("No namespace found for selector " + selector) - } - return namespace -} - -// getComponentPodIPs returns the IP addresses of all pods matching the component selector -func (c *MetricsTestConfig) getComponentPodIPs(t *testing.T) []string { - cmd := exec.Command(c.client, "get", "pods", - "--namespace="+c.componentNamespace, - "--selector="+c.componentSelector, - "--output=jsonpath={.items[*].status.podIP}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error getting pod IPs: %s", string(output)) - - podIPsStr := string(bytes.TrimSpace(output)) - if podIPsStr == "" { - return []string{} - } - - // Split space-separated IPs - fields := bytes.Fields([]byte(podIPsStr)) - ips := make([]string, len(fields)) - for i, field := range fields { - ips[i] = string(field) - } - return ips -} - -func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { - var outOnly, outAndErr bytes.Buffer - allWriter := io.MultiWriter(&outOnly, &outAndErr) - cmd.Stdout = allWriter - cmd.Stderr = &outAndErr - err := cmd.Run() - return outOnly.Bytes(), outAndErr.Bytes(), err -} diff --git a/test/e2e/network_policy_test.go b/test/e2e/network_policy_test.go deleted file mode 100644 index 8e0465f41..000000000 --- a/test/e2e/network_policy_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -const ( - minJustificationLength = 40 - catalogdManagerSelector = "control-plane=catalogd-controller-manager" - operatorManagerSelector = "control-plane=operator-controller-controller-manager" - catalogdMetricsPort = 7443 - catalogdWebhookPort = 9443 - catalogServerPort = 8443 - operatorControllerMetricsPort = 8443 -) - -type portWithJustification struct { - port []networkingv1.NetworkPolicyPort - justification string -} - -// ingressRule defines a k8s IngressRule, along with a justification. -type ingressRule struct { - ports []portWithJustification - from []networkingv1.NetworkPolicyPeer -} - -// egressRule defines a k8s egressRule, along with a justification. -type egressRule struct { - ports []portWithJustification - to []networkingv1.NetworkPolicyPeer -} - -// AllowedPolicyDefinition defines the expected structure and justifications for a NetworkPolicy. -type allowedPolicyDefinition struct { - selector metav1.LabelSelector - policyTypes []networkingv1.PolicyType - ingressRule ingressRule - egressRule egressRule - denyAllIngressJustification string // Justification if Ingress is in PolicyTypes and IngressRules is empty - denyAllEgressJustification string // Justification if Egress is in PolicyTypes and EgressRules is empty -} - -var denyAllPolicySpec = allowedPolicyDefinition{ - selector: metav1.LabelSelector{}, // Empty selector, matches all pods - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - // No IngressRules means deny all ingress if PolicyTypeIngress is present - // No EgressRules means deny all egress if PolicyTypeEgress is present - denyAllIngressJustification: "Denies all ingress traffic to pods selected by this policy by default, unless explicitly allowed by other policy rules, ensuring a baseline secure posture.", - denyAllEgressJustification: "Denies all egress traffic from pods selected by this policy by default, unless explicitly allowed by other policy rules, minimizing potential exfiltration paths.", -} - -var prometheuSpec = allowedPolicyDefinition{ - selector: metav1.LabelSelector{MatchLabels: map[string]string{"app.kubernetes.io/name": "prometheus"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: nil, - justification: "Allows access to the prometheus pod", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, - justification: "Allows prometheus to access other pods", - }, - }, - }, -} - -// Ref: https://docs.google.com/document/d/1bHEEWzA65u-kjJFQRUY1iBuMIIM1HbPy4MeDLX4NI3o/edit?usp=sharing -var allowedNetworkPolicies = map[string]allowedPolicyDefinition{ - "catalogd-controller-manager": { - selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "catalogd-controller-manager"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdMetricsPort}}}, - justification: "Allows Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health.", - }, - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdWebhookPort}}}, - justification: "Permits Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources.", - }, - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogServerPort}}}, - justification: "Enables clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery.", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, // Empty Ports means allow all egress - justification: "Permits catalogd to fetch catalog images from arbitrary container registries and communicate with the Kubernetes API server for its operational needs.", - }, - }, - }, - }, - "operator-controller-controller-manager": { - selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "operator-controller-controller-manager"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: operatorControllerMetricsPort}}}, - justification: "Allows Prometheus to scrape metrics from operator-controller, which is crucial for monitoring its activity, reconciliations, and overall health.", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, // Empty Ports means allow all egress - justification: "Enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server.", - }, - }, - }, - }, -} - -func TestNetworkPolicyJustifications(t *testing.T) { - ctx := context.Background() - - // Validate justifications have min length in the allowedNetworkPolicies definition - for name, policyDef := range allowedNetworkPolicies { - for i, pwj := range policyDef.ingressRule.ports { - require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength, - "Justification for ingress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification) - } - for i, pwj := range policyDef.egressRule.ports { // Corrected variable name from 'rule' to 'pwj' - require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength, - "Justification for egress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification) - } - if policyDef.denyAllIngressJustification != "" { - require.GreaterOrEqualf(t, len(policyDef.denyAllIngressJustification), minJustificationLength, - "DenyAllIngressJustification for policy %q is too short: %q", name, policyDef.denyAllIngressJustification) - } - if policyDef.denyAllEgressJustification != "" { - require.GreaterOrEqualf(t, len(policyDef.denyAllEgressJustification), minJustificationLength, - "DenyAllEgressJustification for policy %q is too short: %q", name, policyDef.denyAllEgressJustification) - } - } - - clientForComponent := testutil.FindK8sClient(t) - - operatorControllerNamespace := getComponentNamespace(t, clientForComponent, operatorManagerSelector) - catalogDNamespace := getComponentNamespace(t, clientForComponent, catalogdManagerSelector) - - policies := &networkingv1.NetworkPolicyList{} - err := c.List(ctx, policies, client.InNamespace(operatorControllerNamespace)) - require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", operatorControllerNamespace) - - clusterPolicies := policies.Items - - if operatorControllerNamespace != catalogDNamespace { - policies := &networkingv1.NetworkPolicyList{} - err := c.List(ctx, policies, client.InNamespace(catalogDNamespace)) - require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", catalogDNamespace) - clusterPolicies = append(clusterPolicies, policies.Items...) - - t.Log("Detected dual-namespace configuration, expecting two prefixed 'default-deny-all-traffic' policies.") - allowedNetworkPolicies["catalogd-default-deny-all-traffic"] = denyAllPolicySpec - allowedNetworkPolicies["operator-controller-default-deny-all-traffic"] = denyAllPolicySpec - } else { - t.Log("Detected single-namespace configuration, expecting one 'default-deny-all-traffic' policy.") - allowedNetworkPolicies["default-deny-all-traffic"] = denyAllPolicySpec - t.Log("Detected single-namespace configuration, expecting 'prometheus' policy.") - allowedNetworkPolicies["prometheus"] = prometheuSpec - } - - validatedRegistryPolicies := make(map[string]bool) - - for _, policy := range clusterPolicies { - t.Run(fmt.Sprintf("Policy_%s", strings.ReplaceAll(policy.Name, "-", "_")), func(t *testing.T) { - expectedPolicy, found := allowedNetworkPolicies[policy.Name] - require.Truef(t, found, "NetworkPolicy %q found in cluster but not in allowed registry. Namespace: %s", policy.Name, policy.Namespace) - validatedRegistryPolicies[policy.Name] = true - - // 1. Compare PodSelector - require.True(t, equality.Semantic.DeepEqual(expectedPolicy.selector, policy.Spec.PodSelector), - "PodSelector mismatch for policy %q. Expected: %+v, Got: %+v", policy.Name, expectedPolicy.selector, policy.Spec.PodSelector) - - // 2. Compare PolicyTypes - require.ElementsMatchf(t, expectedPolicy.policyTypes, policy.Spec.PolicyTypes, - "PolicyTypes mismatch for policy %q.", policy.Name) - - // 3. Validate Ingress Rules - hasIngressPolicyType := false - for _, pt := range policy.Spec.PolicyTypes { - if pt == networkingv1.PolicyTypeIngress { - hasIngressPolicyType = true - break - } - } - - if hasIngressPolicyType { - switch len(policy.Spec.Ingress) { - case 0: - validateDenyAllIngress(t, policy.Name, expectedPolicy) - case 1: - validateSingleIngressRule(t, policy.Name, policy.Spec.Ingress[0], expectedPolicy) - default: - require.Failf(t, "Policy %q in cluster has %d ingress rules. Allowed definition supports at most 1 explicit ingress rule.", policy.Name, len(policy.Spec.Ingress)) - } - } else { - validateNoIngress(t, policy.Name, policy, expectedPolicy) - } - - // 4. Validate Egress Rules - hasEgressPolicyType := false - for _, pt := range policy.Spec.PolicyTypes { - if pt == networkingv1.PolicyTypeEgress { - hasEgressPolicyType = true - break - } - } - - if hasEgressPolicyType { - switch len(policy.Spec.Egress) { - case 0: - validateDenyAllEgress(t, policy.Name, expectedPolicy) - case 1: - validateSingleEgressRule(t, policy.Name, policy.Spec.Egress[0], expectedPolicy) - default: - require.Failf(t, "Policy %q in cluster has %d egress rules. Allowed definition supports at most 1 explicit egress rule.", policy.Name, len(policy.Spec.Egress)) - } - } else { - validateNoEgress(t, policy, expectedPolicy) - } - }) - } - - // 5. Ensure all policies in the registry were found in the cluster - require.Len(t, validatedRegistryPolicies, len(allowedNetworkPolicies), - "Mismatch between number of expected policies in registry (%d) and number of policies found & validated in cluster (%d). Missing policies from registry: %v", len(allowedNetworkPolicies), len(validatedRegistryPolicies), missingPolicies(allowedNetworkPolicies, validatedRegistryPolicies)) -} - -func missingPolicies(expected map[string]allowedPolicyDefinition, actual map[string]bool) []string { - missing := []string{} - for k := range expected { - if !actual[k] { - missing = append(missing, k) - } - } - return missing -} - -// validateNoEgress confirms that a policy which does not have spec.PolicyType=Egress specified -// has no corresponding egress rules or expectations defined. -func validateNoEgress(t *testing.T, policy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) { - // Policy is NOT expected to affect Egress traffic (no Egress in PolicyTypes) - // Expected: Cluster has no egress rules; Registry has no DenyAllEgressJustification and empty EgressRule. - require.Emptyf(t, policy.Spec.Egress, - "Policy %q: Cluster does not have Egress PolicyType, but has Egress rules defined.", policy.Name) - require.Emptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster does not have Egress PolicyType. Registry's DenyAllEgressJustification is not empty.", policy.Name) - require.Emptyf(t, expectedPolicy.egressRule.ports, - "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.Ports is not empty.", policy.Name) - require.Emptyf(t, expectedPolicy.egressRule.to, - "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.To is not empty.", policy.Name) -} - -// validateDenyAllEgress confirms that a policy with Egress PolicyType but no explicit rules -// correctly corresponds to a "deny all" expectation. -func validateDenyAllEgress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Egress is present, but no explicit egress rules -> Deny All Egress by this policy. - // Expected: DenyAllEgressJustification is set; EgressRule.Ports and .To are empty. - require.NotEmptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's DenyAllEgressJustification is empty.", policyName) - require.Emptyf(t, expectedPolicy.egressRule.ports, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.egressRule.to, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.To is not empty.", policyName) -} - -// validateSingleEgressRule validates a policy that has exactly one explicit egress rule, -// distinguishing between "allow-all" and more specific rules. -func validateSingleEgressRule(t *testing.T, policyName string, clusterEgressRule networkingv1.NetworkPolicyEgressRule, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Egress is present, and there's one explicit egress rule. - // Expected: DenyAllEgressJustification is empty; EgressRule matches the cluster's rule. - expectedEgressRule := expectedPolicy.egressRule - - require.Emptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster has a specific Egress rule. Registry's DenyAllEgressJustification should be empty.", policyName) - - isClusterRuleAllowAllPorts := len(clusterEgressRule.Ports) == 0 - isClusterRuleAllowAllPeers := len(clusterEgressRule.To) == 0 - - if isClusterRuleAllowAllPorts && isClusterRuleAllowAllPeers { // Handles egress: [{}] - allow all ports to all peers - require.Lenf(t, expectedEgressRule.ports, 1, - "Policy %q (allow-all egress): Expected EgressRule.Ports to have 1 justification entry, got %d", policyName, len(expectedEgressRule.ports)) - if len(expectedEgressRule.ports) == 1 { // Guard against panic - require.Nilf(t, expectedEgressRule.ports[0].port, - "Policy %q (allow-all egress): Expected EgressRule.Ports[0].Port to be nil, got %+v", policyName, expectedEgressRule.ports[0].port) - } - require.Conditionf(t, func() bool { return len(expectedEgressRule.to) == 0 }, - "Policy %q (allow-all egress): Expected EgressRule.To to be empty for allow-all peers, got %+v", policyName, expectedEgressRule.to) - } else { - // Specific egress rule (not the simple allow-all ports and allow-all peers) - require.True(t, equality.Semantic.DeepEqual(expectedEgressRule.to, clusterEgressRule.To), - "Policy %q, Egress Rule: 'To' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedEgressRule.to, clusterEgressRule.To) - - var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort - for _, pwj := range expectedEgressRule.ports { - allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...) - } - require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterEgressRule.Ports, - "Policy %q, Egress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterEgressRule.Ports) - } -} - -// validateNoIngress confirms that a policy which does not have the Ingress PolicyType -// has no corresponding ingress rules or expectations defined. -func validateNoIngress(t *testing.T, policyName string, clusterPolicy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) { - // Policy is NOT expected to affect Ingress traffic (no Ingress in PolicyTypes) - // Expected: Cluster has no ingress rules; Registry has no DenyAllIngressJustification and empty IngressRule. - require.Emptyf(t, clusterPolicy.Spec.Ingress, - "Policy %q: Cluster does not have Ingress PolicyType, but has Ingress rules defined.", policyName) - require.Emptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's DenyAllIngressJustification is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.ports, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.from, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.From is not empty.", policyName) -} - -// validateDenyAllIngress confirms that a policy with Ingress PolicyType but no explicit rules -// correctly corresponds to a "deny all" expectation. -func validateDenyAllIngress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Ingress is present, but no explicit ingress rules -> Deny All Ingress by this policy. - // Expected: DenyAllIngressJustification is set; IngressRule.Ports and .From are empty. - require.NotEmptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's DenyAllIngressJustification is empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.ports, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.from, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.From is not empty.", policyName) -} - -// validateSingleIngressRule validates a policy that has exactly one explicit ingress rule. -func validateSingleIngressRule(t *testing.T, policyName string, clusterIngressRule networkingv1.NetworkPolicyIngressRule, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Ingress is present, and there's one explicit ingress rule. - // Expected: DenyAllIngressJustification is empty; IngressRule matches the cluster's rule. - expectedIngressRule := expectedPolicy.ingressRule - - require.Emptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster has a specific Ingress rule. Registry's DenyAllIngressJustification should be empty.", policyName) - - // Compare 'From' - require.True(t, equality.Semantic.DeepEqual(expectedIngressRule.from, clusterIngressRule.From), - "Policy %q, Ingress Rule: 'From' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedIngressRule.from, clusterIngressRule.From) - - // Compare 'Ports' by aggregating the ports from our justified structure - var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort - for _, pwj := range expectedIngressRule.ports { - allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...) - } - require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterIngressRule.Ports, - "Policy %q, Ingress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterIngressRule.Ports) -} diff --git a/test/e2e/single_namespace_support_test.go b/test/e2e/single_namespace_support_test.go deleted file mode 100644 index 190e786ba..000000000 --- a/test/e2e/single_namespace_support_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - soNsFlag = "SingleOwnNamespaceInstallSupport" -) - -func TestClusterExtensionSingleNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension config") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - watchNamespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-target", - }, - } - require.NoError(t, c.Create(t.Context(), &watchNamespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &watchNamespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the single-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "single-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for single-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, watchNamespace.GetName())), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for single-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the single-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "single-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, watchNamespace.GetName(), deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionOwnNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension with OwnNamespace install mode support") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the own-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "own-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace other than the install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(`{"watchNamespace": "some-namespace"}`), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, "invalid ClusterExtension configuration") - require.Contains(ct, cond.Message, fmt.Sprintf("watchNamespace must be \"%s\"", clusterExtension.Spec.Namespace)) - require.Contains(ct, cond.Message, "OwnNamespace install mode") - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace = install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, clusterExtension.Spec.Namespace)), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the own-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "own-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, clusterExtension.Spec.Namespace, deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionVersionUpdate(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By forcing update of ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - t.Log("We should have two ClusterExtensionRevision resources") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - cerList := &ocv1.ClusterExtensionRevisionList{} - require.NoError(ct, c.List(context.Background(), cerList)) - require.Len(ct, cerList.Items, 2) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go new file mode 100644 index 000000000..ad47d5102 --- /dev/null +++ b/test/e2e/steps/hooks.go @@ -0,0 +1,166 @@ +package steps + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + + "github.com/cucumber/godog" + "github.com/go-logr/logr" + "github.com/spf13/pflag" + "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/component-base/featuregate" + "k8s.io/klog/v2/textlogger" + + "github.com/operator-framework/operator-controller/internal/operator-controller/features" +) + +type resource struct { + name string + kind string +} + +type scenarioContext struct { + id string + namespace string + clusterExtensionName string + removedResources []unstructured.Unstructured + backGroundCmds []*exec.Cmd + metricsResponse map[string]string +} + +type contextKey string + +const ( + scenarioContextKey contextKey = "scenario-context" +) + +var ( + devMode = false + featureGates = map[featuregate.Feature]bool{ + features.WebhookProviderCertManager: true, + features.PreflightPermissions: false, + features.SingleOwnNamespaceInstallSupport: false, + features.SyntheticPermissions: false, + features.WebhookProviderOpenshiftServiceCA: false, + features.HelmChartSupport: false, + features.BoxcutterRuntime: false, + } + logger logr.Logger +) + +func init() { + flagSet := pflag.CommandLine + flagSet.BoolVar(&devMode, "log.debug", false, "print debug log level") +} + +func RegisterHooks(sc *godog.ScenarioContext) { + sc.Before(CheckFeatureTags) + sc.Before(CreateScenarioContext) + + sc.After(ScenarioCleanup) +} + +func BeforeSuite() { + if devMode { + logger = textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(1))) + } else { + logger = textlogger.NewLogger(textlogger.NewConfig()) + } + + raw, err := k8sClient("get", "deployments", "-A", "-l", "app.kubernetes.io/part-of=olm", "-o", "jsonpath={.items}") + if err != nil { + panic(fmt.Errorf("failed to get OLM deployments: %v", err)) + } + dl := []v1.Deployment{} + if err := json.Unmarshal([]byte(raw), &dl); err != nil { + panic(fmt.Errorf("failed to unmarshal OLM deployments: %v", err)) + } + var olm *v1.Deployment + + for _, d := range dl { + if d.Name == olmDeploymentName { + olm = &d + olmNamespace = d.Namespace + break + } + } + + featureGatePattern := regexp.MustCompile(`--feature-gates=([[:alnum:]]+)=(true|false)`) + for _, c := range olm.Spec.Template.Spec.Containers { + if c.Name == "manager" { + for _, arg := range c.Args { + if matches := featureGatePattern.FindStringSubmatch(arg); matches != nil { + v, err := strconv.ParseBool(matches[2]) + if err != nil { + panic(fmt.Errorf("failed to parse feature gate %q=%q: %v", matches[1], matches[2], err)) + } + featureGates[featuregate.Feature(matches[1])] = v + } + } + } + } + logger.Info(fmt.Sprintf("Enabled feature gates: %v", featureGates)) +} + +func CheckFeatureTags(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + for _, tag := range sc.Tags { + if enabled, found := featureGates[featuregate.Feature(tag.Name[1:])]; found && !enabled { + logger.Info(fmt.Sprintf("Skipping scenario %q because feature gate %q is disabled", sc.Name, tag.Name[1:])) + return ctx, godog.ErrSkip + } + } + return ctx, nil +} + +func CreateScenarioContext(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + scCtx := &scenarioContext{ + id: sc.Id, + namespace: fmt.Sprintf("ns-%s", sc.Id), + clusterExtensionName: fmt.Sprintf("ce-%s", sc.Id), + } + return context.WithValue(ctx, scenarioContextKey, scCtx), nil +} + +func scenarioCtx(ctx context.Context) *scenarioContext { + return ctx.Value(scenarioContextKey).(*scenarioContext) +} + +func stderrOutput(err error) string { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) && exitErr != nil { + return string(exitErr.Stderr) + } + return "" +} + +func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) { + sc := scenarioCtx(ctx) + for _, bgCmd := range sc.backGroundCmds { + if p := bgCmd.Process; p != nil { + _ = p.Kill() + } + } + if err != nil { + return ctx, err + } + + forDeletion := []resource{} + if sc.clusterExtensionName != "" { + forDeletion = append(forDeletion, resource{name: sc.clusterExtensionName, kind: "clusterextension"}) + } + forDeletion = append(forDeletion, resource{name: sc.namespace, kind: "namespace"}) + go func() { + for _, r := range forDeletion { + if _, err := k8sClient("delete", r.kind, r.name, "--ignore-not-found=true"); err != nil { + logger.Info("Error deleting resource", "name", r.name, "namespace", sc.namespace, "stderr", stderrOutput(err)) + } + } + }() + return ctx, nil +} diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go new file mode 100644 index 000000000..7beb17752 --- /dev/null +++ b/test/e2e/steps/steps.go @@ -0,0 +1,728 @@ +package steps + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/cucumber/godog" + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + "sigs.k8s.io/yaml" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" +) + +const ( + olmDeploymentName = "operator-controller-controller-manager" + timeout = 5 * time.Minute + tick = 1 * time.Second +) + +var ( + olmNamespace = "olmv1-system" + kubeconfigPath string + k8sCli string +) + +func RegisterSteps(sc *godog.ScenarioContext) { + sc.Step(`^OLM is available$`, OLMisAvailable) + sc.Step(`^(?i)bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) + + sc.Step(`^(?i)ClusterExtension is applied(?:\s+.*)?$`, ResourceIsApplied) + sc.Step(`^(?i)ClusterExtension is updated to version "([^"]+)"$`, ClusterExtensionVersionUpdate) + sc.Step(`^(?i)ClusterExtension is updated(?:\s+.*)?$`, ResourceIsApplied) + sc.Step(`^(?i)ClusterExtension is available$`, ClusterExtensionIsAvailable) + sc.Step(`^(?i)ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut) + sc.Step(`^(?i)ClusterExtension reports "([^"]+)" as active revision(s?)$`, ClusterExtensionReportsActiveRevisions) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterExtensionReportsCondition) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutMsg) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason) + sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionRevisionReportsConditionWithoutMsg) + sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" is archived$`, ClusterExtensionRevisionIsArchived) + + sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable) + sc.Step(`^(?i)resource "([^"]+)" is available$`, ResourceAvailable) + sc.Step(`^(?i)resource "([^"]+)" is removed$`, ResourceRemoved) + sc.Step(`^(?i)resource "([^"]+)" exists$`, ResourceAvailable) + sc.Step(`^(?i)resource is applied$`, ResourceIsApplied) + sc.Step(`^(?i)resource "deployment/test-operator" reports as (not ready|ready)$`, MarkTestOperatorNotReady) + + sc.Step(`^(?i)resource apply fails with error msg containing "([^"]+)"$`, ResourceApplyFails) + sc.Step(`^(?i)resource "([^"]+)" is eventually restored$`, ResourceRestored) + sc.Step(`^(?i)resource "([^"]+)" matches$`, ResourceMatches) + + sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]*)" in test namespace is cluster admin$`, ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]+)" in test namespace has permissions to fetch "([^"]+)" metrics$`, ServiceAccountWithFetchMetricsPermissions) + sc.Step(`^(?i)ServiceAccount "([^"]+)" sends request to "([^"]+)" endpoint of "([^"]+)" service$`, SendMetricsRequest) + + sc.Step(`^"([^"]+)" catalog is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion) + sc.Step(`^"([^"]+)" catalog serves bundles$`, CatalogServesBundles) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" serves bundles$`, CatalogServesBundles) + sc.Step(`^"([^"]+)" catalog image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage) + + sc.Step(`^(?i)operator "([^"]+)" target namespace is "([^"]+)"$`, OperatorTargetNamespace) + sc.Step(`^(?i)Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned) +} + +func init() { + flagSet := pflag.CommandLine + flagSet.StringVar(&k8sCli, "k8s.cli", "kubectl", "Path to k8s cli") + if v, found := os.LookupEnv("KUBECONFIG"); found { + kubeconfigPath = v + } else { + home, err := os.UserHomeDir() + if err != nil { + panic(fmt.Sprintf("cannot determine user home directory: %v", err)) + } + flagSet.StringVar(&kubeconfigPath, "kubeconfig", filepath.Join(home, ".kube", "config"), "Paths to a kubeconfig. Only required if out-of-cluster.") + } +} + +func k8sClient(args ...string) (string, error) { + cmd := exec.Command(k8sCli, args...) + logger.V(1).Info("Running", "command", strings.Join(cmd.Args, " ")) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + if err != nil { + logger.V(1).Info("Failed to run", "command", strings.Join(cmd.Args, " "), "stderr", stderrOutput(err), "error", err) + } + output := string(b) + logger.V(1).Info("Output", "command", strings.Join(cmd.Args, " "), "output", output) + return output, err +} + +func k8scliWithInput(yaml string, args ...string) (string, error) { + cmd := exec.Command(k8sCli, args...) + cmd.Stdin = bytes.NewBufferString(yaml) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + return string(b), err +} + +func OLMisAvailable(ctx context.Context) error { + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "jsonpath='{.status.conditions[?(@.type==\"Available\")].status}'") + if err != nil { + return false + } + return v == "'True'" + }, timeout, tick) + return nil +} + +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + if err := json.Unmarshal([]byte(v), &bundle); err != nil { + return false + } + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} + +func toUnstructured(yamlContent string) (*unstructured.Unstructured, error) { + var u map[string]any + if err := yaml.Unmarshal([]byte(yamlContent), &u); err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil +} + +func substituteScenarioVars(content string, sc *scenarioContext) string { + vars := map[string]string{ + "TEST_NAMESPACE": sc.namespace, + "NAME": sc.clusterExtensionName, + "CATALOG_IMG": "docker-registry.operator-controller-e2e.svc.cluster.local:5000/e2e/test-catalog:v1", + } + if v, found := os.LookupEnv("CATALOG_IMG"); found { + vars["CATALOG_IMG"] = v + } + m := func(k string) string { + if v, found := vars[k]; found { + return v + } + return "" + } + return os.Expand(content, m) +} + +func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + _, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + waitFor(ctx, func() bool { + _, err := k8scliWithInput(yamlContent, "apply", "-f", "-") + if err == nil { + return false + } + if stdErr := stderrOutput(err); !strings.Contains(stdErr, errMsg) { + return false + } + return true + }) + return nil +} + +func ClusterExtensionVersionUpdate(ctx context.Context, version string) error { + sc := scenarioCtx(ctx) + patch := map[string]any{ + "spec": map[string]any{ + "source": map[string]any{ + "catalog": map[string]any{ + "version": version, + }, + }, + }, + } + pb, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = k8sClient("patch", "clusterextension", sc.clusterExtensionName, "--type", "merge", "-p", string(pb)) + return err +} + +func ResourceIsApplied(ctx context.Context, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + res, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + out, err := k8scliWithInput(yamlContent, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply resource %v %w", out, err) + } + if res.GetKind() == "ClusterExtension" { + sc.clusterExtensionName = res.GetName() + } + return nil +} + +func ClusterExtensionIsAvailable(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Installed\")].status}") + if err != nil { + return false + } + return v == "True" + }, timeout, tick) + return nil +} + +func ClusterExtensionIsRolledOut(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Progressing\")]}") + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + return condition["status"] == "True" && condition["reason"] == "Succeeded" && condition["type"] == "Progressing" + }, timeout, tick) + return nil +} + +func waitFor(ctx context.Context, conditionFn func() bool) { + require.Eventually(godog.T(ctx), conditionFn, timeout, tick) +} + +func waitForCondition(ctx context.Context, resourceType, resourceName, conditionType, conditionStatus string, conditionReason *string, msg *string) error { + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", resourceType, resourceName, "-o", fmt.Sprintf("jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)) + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + if condition["status"] != conditionStatus { + return false + } + if conditionReason != nil && condition["reason"] != *conditionReason { + return false + } + if msg != nil && condition["message"] != *msg { + return false + } + + return true + }, timeout, tick) + return nil +} + +func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msg *string) error { + sc := scenarioCtx(ctx) + return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msg) +} + +func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error { + var conditionMsg *string + if msg != nil { + conditionMsg = ptr.To(substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx))) + } + return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, conditionMsg) +} + +func ClusterExtensionReportsConditionWithoutMsg(ctx context.Context, conditionType, conditionStatus, conditionReason string) error { + return ClusterExtensionReportsCondition(ctx, conditionType, conditionStatus, conditionReason, nil) +} + +func ClusterExtensionReportsConditionWithoutReason(ctx context.Context, conditionType, conditionStatus string) error { + return waitForExtensionCondition(ctx, conditionType, conditionStatus, nil, nil) +} + +func ClusterExtensionReportsActiveRevisions(ctx context.Context, rawRevisionNames string) error { + sc := scenarioCtx(ctx) + expectedRevisionNames := sets.New[string]() + for _, rev := range strings.Split(rawRevisionNames, ",") { + expectedRevisionNames.Insert(substituteScenarioVars(strings.TrimSpace(rev), sc)) + } + + waitFor(ctx, func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.activeRevisions}") + if err != nil { + return false + } + var activeRevisions []ocv1.RevisionStatus + if err := json.Unmarshal([]byte(v), &activeRevisions); err != nil { + return false + } + activeRevisionsNames := sets.New[string]() + for _, rev := range activeRevisions { + activeRevisionsNames.Insert(rev.Name) + } + return activeRevisionsNames.Equal(expectedRevisionNames) + }) + return nil +} + +func ClusterExtensionRevisionReportsConditionWithoutMsg(ctx context.Context, revisionName, conditionType, conditionStatus, conditionReason string) error { + return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), conditionType, conditionStatus, &conditionReason, nil) +} + +func ClusterExtensionRevisionIsArchived(ctx context.Context, revisionName string) error { + return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), "Progressing", "False", ptr.To("Archived"), nil) +} + +func ResourceAvailable(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + _, err := k8sClient("get", rtype, name, "-n", sc.namespace) + return err == nil + }) + return nil +} + +func ResourceRemoved(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return err + } + obj, err := toUnstructured(yaml) + if err != nil { + return err + } + sc.removedResources = append(sc.removedResources, *obj) + _, err = k8sClient("delete", rtype, name, "-n", sc.namespace) + return err +} + +func ResourceMatches(ctx context.Context, resource string, requiredContentTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + requiredContent, err := toUnstructured(substituteScenarioVars(requiredContentTemplate.Content, sc)) + if err != nil { + return fmt.Errorf("failed to parse required resource yaml: %v", err) + } + waitFor(ctx, func() bool { + objJson, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + obj, err := toUnstructured(objJson) + if err != nil { + return false + } + patch, err := json.Marshal(requiredContent.Object) + if err != nil { + return false + } + updJson, err := jsonpatch.MergePatch([]byte(objJson), patch) + if err != nil { + return false + } + upd, err := toUnstructured(string(updJson)) + if err != nil { + return false + } + + return len(cmp.Diff(upd.Object, obj.Object)) == 0 + }) + return nil +} + +func ResourceRestored(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return false + } + obj, err := toUnstructured(yaml) + if err != nil { + return false + } + ct := obj.GetCreationTimestamp() + + for i, removed := range sc.removedResources { + rct := removed.GetCreationTimestamp() + if removed.GetName() == obj.GetName() && removed.GetKind() == obj.GetKind() && rct.Before(&ct) { + switch rtype { + case "configmap": + if !reflect.DeepEqual(removed.Object["data"], obj.Object["data"]) { + return false + } + default: + if !reflect.DeepEqual(removed.Object["spec"], obj.Object["spec"]) { + return false + } + } + sc.removedResources = append(sc.removedResources[:i], sc.removedResources[i+1:]...) + return true + } + } + return false + }) + return nil +} + +func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error { + sc := scenarioCtx(ctx) + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", rbacTemplate)) + if err != nil { + return fmt.Errorf("failed to read RBAC template yaml: %v", err) + } + + vars := map[string]string{ + "TEST_NAMESPACE": sc.namespace, + "SERVICE_ACCOUNT_NAME": serviceAccount, + "SERVICEACCOUNT_NAME": serviceAccount, + "CLUSTER_EXTENSION_NAME": sc.clusterExtensionName, + "CLUSTEREXTENSION_NAME": sc.clusterExtensionName, + } + if len(keyValue) > 0 { + for i := 0; i < len(keyValue); i += 2 { + vars[keyValue[i]] = keyValue[i+1] + } + } + m := func(k string) string { + if v, found := vars[k]; found { + return v + } + return "" + } + + // Replace template variables + yaml := os.Expand(string(yamlContent), m) + + // Apply the RBAC configuration + _, err = k8scliWithInput(yaml, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply RBAC configuration: %v: %s", err, stderrOutput(err)) + } + + return nil +} + +func ServiceAccountWithNeededPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "rbac-template.yaml") +} + +func ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "cluster-admin-rbac-template.yaml") +} + +func ServiceAccountWithFetchMetricsPermissions(ctx context.Context, serviceAccount string, controllerName string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "metrics-reader-rbac-template.yaml", "CONTROLLER_NAME", controllerName) +} + +func httpGet(url string, token string) (*http.Response, error) { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // we don't care about the certificate + } + client := &http.Client{Transport: tr} + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+token) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + return resp, nil +} + +func randomAvailablePort() (int, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +func SendMetricsRequest(ctx context.Context, serviceAccount string, endpoint string, controllerName string) error { + sc := scenarioCtx(ctx) + v, err := k8sClient("get", "service", "-n", olmNamespace, fmt.Sprintf("%s-service", controllerName), "-o", "json") + if err != nil { + return err + } + var service corev1.Service + if err := json.Unmarshal([]byte(v), &service); err != nil { + return err + } + podNameCmd := []string{"get", "pod", "-n", olmNamespace, "-o", "jsonpath={.items}"} + for k, v := range service.Spec.Selector { + podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v)) + } + v, err = k8sClient(podNameCmd...) + if err != nil { + return err + } + + var pods []corev1.Pod + if err := json.Unmarshal([]byte(v), &pods); err != nil { + return err + } + token, err := k8sClient("create", "token", serviceAccount, "-n", sc.namespace) + if err != nil { + return err + } + var metricsPort int32 + for _, p := range service.Spec.Ports { + if p.Name == "metrics" { + metricsPort = p.Port + break + } + } + sc.metricsResponse = make(map[string]string) + for _, p := range pods { + port, err := randomAvailablePort() + if err != nil { + return err + } + portForwardCmd := exec.Command(k8sCli, "port-forward", "-n", p.Namespace, fmt.Sprintf("pod/%s", p.Name), fmt.Sprintf("%d:%d", port, metricsPort)) //nolint:gosec // perfectly safe to start port-forwarder for provided controller name + logger.V(1).Info("starting port-forward", "command", strings.Join(portForwardCmd.Args, " ")) + if err := portForwardCmd.Start(); err != nil { + logger.Error(err, fmt.Sprintf("failed to start port-forward for pod %s", p.Name)) + return err + } + waitFor(ctx, func() bool { + resp, err := httpGet(fmt.Sprintf("https://localhost:%d%s", port, endpoint), token) + if err != nil { + return false + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + b, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + sc.metricsResponse[p.Name] = string(b) + return true + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + logger.V(1).Info("failed to get metrics", "pod", p.Name, "response", string(b)) + return false + }) + if err := portForwardCmd.Process.Kill(); err != nil { + return err + } + if _, err := portForwardCmd.Process.Wait(); err != nil { + return err + } + } + + return nil +} + +func CatalogIsUpdatedToVersion(name, version string) error { + ref, err := k8sClient("get", "clustercatalog", fmt.Sprintf("%s-catalog", name), "-o", "jsonpath={.spec.source.image.ref}") + if err != nil { + return err + } + i := strings.LastIndexByte(ref, ':') + if i == -1 { + return fmt.Errorf("failed to find tag in image reference %s", ref) + } + base := ref[:i] + patch := map[string]any{ + "spec": map[string]any{ + "source": map[string]any{ + "image": map[string]any{ + "ref": fmt.Sprintf("%s:%s", base, version), + }, + }, + }, + } + pb, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = k8sClient("patch", "clustercatalog", fmt.Sprintf("%s-catalog", name), "--type", "merge", "-p", string(pb)) + return err +} + +func CatalogServesBundles(ctx context.Context, catalogName string) error { + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", fmt.Sprintf("%s-catalog-template.yaml", catalogName))) + if err != nil { + return fmt.Errorf("failed to read catalog yaml: %v", err) + } + + _, err = k8scliWithInput(substituteScenarioVars(string(yamlContent), scenarioCtx(ctx)), "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply catalog: %v", err) + } + + return nil +} + +func TagCatalogImage(name, oldTag, newTag string) error { + imageRef := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), fmt.Sprintf("e2e/%s-catalog:%s", name, oldTag)) + return crane.Tag(imageRef, newTag, crane.Insecure) +} + +func PrometheusMetricsAreReturned(ctx context.Context) error { + sc := scenarioCtx(ctx) + for podName, mr := range sc.metricsResponse { + if mr == "" { + return fmt.Errorf("metrics response is empty for pod %s", podName) + } + parser := expfmt.NewTextParser(model.UTF8Validation) + metricsFamilies, err := parser.TextToMetricFamilies(strings.NewReader(mr)) + if err != nil { + return fmt.Errorf("failed to parse metrics response for pod %s: %v", podName, err) + } + if len(metricsFamilies) == 0 { + return fmt.Errorf("metrics response does not contain any metrics for pod %s", podName) + } + } + return nil +} + +func OperatorTargetNamespace(ctx context.Context, operator, namespace string) error { + sc := scenarioCtx(ctx) + namespace = substituteScenarioVars(namespace, sc) + raw, err := k8sClient("get", "deployment", "-n", sc.namespace, operator, "-o", "json") + if err != nil { + return err + } + d := &appsv1.Deployment{} + if err := json.Unmarshal([]byte(raw), d); err != nil { + return err + } + + if tns := d.Spec.Template.Annotations["olm.targetNamespaces"]; tns != namespace { + return fmt.Errorf("expected target namespace %s, got %s", namespace, tns) + } + return nil +} + +func MarkTestOperatorNotReady(ctx context.Context, state string) error { + sc := scenarioCtx(ctx) + v, err := k8sClient("get", "deployment", "-n", sc.namespace, "test-operator", "-o", "jsonpath={.spec.selector.matchLabels}") + if err != nil { + return err + } + var labels map[string]string + if err := json.Unmarshal([]byte(v), &labels); err != nil { + return err + } + podNameCmd := []string{"get", "pod", "-n", sc.namespace, "-o", "jsonpath={.items[0].metadata.name}"} + for k, v := range labels { + podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v)) + } + podName, err := k8sClient(podNameCmd...) + if err != nil { + return err + } + var op string + switch state { + case "not ready": + op = "rm" + case "ready": + op = "touch" + default: + return fmt.Errorf("invalid state %s", state) + } + _, err = k8sClient("exec", podName, "-n", sc.namespace, "--", op, "/var/www/ready") + return err +} diff --git a/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml new file mode 100644 index 000000000..c020c7ca5 --- /dev/null +++ b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-cluster-admin-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/extra-catalog-template.yaml b/test/e2e/steps/testdata/extra-catalog-template.yaml new file mode 100644 index 000000000..a43d9b324 --- /dev/null +++ b/test/e2e/steps/testdata/extra-catalog-template.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: extra-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: ${CATALOG_IMG} diff --git a/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml new file mode 100644 index 000000000..4001f8681 --- /dev/null +++ b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${CONTROLLER_NAME}-metrics-reader-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ${CONTROLLER_NAME}-metrics-reader +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/rbac-template.yaml b/test/e2e/steps/testdata/rbac-template.yaml new file mode 100644 index 000000000..d975d7698 --- /dev/null +++ b/test/e2e/steps/testdata/rbac-template.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole +rules: + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensions, clusterextensions/finalizers] + resourceNames: ["${CLUSTEREXTENSION_NAME}"] + verbs: [update] + # Allow ClusterExtensionRevisions to set blockOwnerDeletion ownerReferences + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensionrevisions, clusterextensionrevisions/finalizers] + verbs: [update, create, list, watch, get, delete, patch] + + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: [""] + resources: + - configmaps + - secrets + - services + - serviceaccounts + - events + - namespaces + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: ["apps"] + resources: + - deployments + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - clusterroles + - roles + - clusterrolebindings + - rolebindings + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: [create] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: [create] + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-install-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/test-catalog-template.yaml b/test/e2e/steps/testdata/test-catalog-template.yaml new file mode 100644 index 000000000..7e46872f3 --- /dev/null +++ b/test/e2e/steps/testdata/test-catalog-template.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: test-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: ${CATALOG_IMG} diff --git a/test/e2e/webhook_support_test.go b/test/e2e/webhook_support_test.go deleted file mode 100644 index 9fd05184a..000000000 --- a/test/e2e/webhook_support_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -var dynamicClient dynamic.Interface - -func TestWebhookSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, "WebhookProviderCertManager") - t.Log("Test support for bundles with webhooks") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - if dynamicClient == nil { - var err error - dynamicClient, err = dynamic.NewForConfig(cfg) - require.NoError(t, err) - } - - t.Log("By creating install namespace, and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the webhook-operator ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By installing the webhook-operator ClusterExtension") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "webhook-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for webhook-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By waiting for webhook-operator deployment to be available") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "webhook-operator-controller-manager"}, deployment)) - available := false - for _, cond := range deployment.Status.Conditions { - if cond.Type == appsv1.DeploymentAvailable { - available = cond.Status == corev1.ConditionTrue - } - } - require.True(ct, available) - }, pollDuration, pollInterval) - - v1Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v1", - Resource: "webhooktests", - } - v1Client := dynamicClient.Resource(v1Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually seeing that invalid CR creation is rejected by the validating webhook") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - obj := getWebhookOperatorResource("invalid-test-cr", namespace.GetName(), false) - _, err := v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.Error(ct, err) - require.Contains(ct, err.Error(), "Invalid value: false: Spec.Valid must be true") - }, pollDuration, pollInterval) - - var ( - res *unstructured.Unstructured - err error - obj = getWebhookOperatorResource("valid-test-cr", namespace.GetName(), true) - ) - - t.Log("By eventually creating a valid CR") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - t.Cleanup(func() { - require.NoError(t, v1Client.Delete(context.Background(), obj.GetName(), metav1.DeleteOptions{})) - }) - - require.Equal(t, map[string]interface{}{ - "valid": true, - "mutate": true, - }, res.Object["spec"]) - - t.Log("By checking a valid CR is converted to v2 by the conversion webhook") - v2Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v2", - Resource: "webhooktests", - } - v2Client := dynamicClient.Resource(v2Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually getting the valid CR with a v2 client") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v2Client.Get(t.Context(), obj.GetName(), metav1.GetOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - - t.Log("and verifying that the CR is correctly converted") - require.Equal(t, map[string]interface{}{ - "conversion": map[string]interface{}{ - "valid": true, - "mutate": true, - }, - }, res.Object["spec"]) -} - -func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "webhook.operators.coreos.io/v1", - "kind": "webhooktests", - "metadata": map[string]interface{}{ - "name": name, - "namespace": namespace, - }, - "spec": map[string]interface{}{ - "valid": valid, - }, - }, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go index c1f3e442d..79d425e87 100644 --- a/vendor/github.com/cenkalti/backoff/v5/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -1,7 +1,7 @@ package backoff import ( - "math/rand" + "math/rand/v2" "time" ) @@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds. Note: MaxInterval caps the RetryInterval and not the randomized interval. -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: +Example: Given the following default arguments, for 9 tries the sequence will be: Request # RetryInterval (seconds) Randomized Interval (seconds) @@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try: 7 5.692 [2.846, 8.538] 8 8.538 [4.269, 12.807] 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop Note: Implementation is not thread-safe. */ diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go index e43f47fb8..32a7f9883 100644 --- a/vendor/github.com/cenkalti/backoff/v5/retry.go +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption { } } -// WithMaxTries limits the number of retry attempts. +// WithMaxTries limits the number of all attempts. func WithMaxTries(n uint) RetryOption { return func(args *retryOptions) { args.MaxTries = n @@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti // Handle permanent errors without retrying. var permanent *PermanentError if errors.As(err, &permanent) { - return res, err + return res, permanent.Unwrap() } // Stop retrying if context is cancelled. diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index c552b7923..dfd67cbfa 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -1,200 +1,235 @@ -cel.dev/expr,https://github.com/google/cel-spec/blob/v0.19.1/LICENSE,Apache-2.0 -cloud.google.com/go/auth,https://github.com/googleapis/google-cloud-go/blob/auth/v0.9.4/auth/LICENSE,Apache-2.0 -cloud.google.com/go/auth/oauth2adapt,https://github.com/googleapis/google-cloud-go/blob/auth/oauth2adapt/v0.2.4/auth/oauth2adapt/LICENSE,Apache-2.0 -cloud.google.com/go/compute/metadata,https://github.com/googleapis/google-cloud-go/blob/compute/metadata/v0.5.2/compute/metadata/LICENSE,Apache-2.0 -github.com/Azure/azure-sdk-for-go/sdk/azcore,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azcore/v1.14.0/sdk/azcore/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/azidentity,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.7.0/sdk/azidentity/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/internal,https://github.com/Azure/azure-sdk-for-go/blob/sdk/internal/v1.10.0/sdk/internal/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,https://github.com/Azure/azure-sdk-for-go/blob/sdk/resourcemanager/dns/armdns/v1.2.0/sdk/resourcemanager/dns/armdns/LICENSE.txt,MIT -github.com/Azure/go-ntlmssp,https://github.com/Azure/go-ntlmssp/blob/754e69321358/LICENSE,MIT -github.com/AzureAD/microsoft-authentication-library-for-go/apps,https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/v1.2.2/LICENSE,MIT -github.com/Khan/genqlient/graphql,https://github.com/Khan/genqlient/blob/v0.7.0/LICENSE,MIT -github.com/NYTimes/gziphandler,https://github.com/NYTimes/gziphandler/blob/v1.1.1/LICENSE,Apache-2.0 -github.com/Venafi/vcert/v5,https://github.com/Venafi/vcert/blob/v5.8.0/LICENSE,Apache-2.0 -github.com/akamai/AkamaiOPEN-edgegrid-golang,https://github.com/akamai/AkamaiOPEN-edgegrid-golang/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/antlr4-go/antlr/v4,https://github.com/antlr4-go/antlr/blob/v4.13.1/LICENSE,BSD-3-Clause -github.com/asaskevich/govalidator,https://github.com/asaskevich/govalidator/blob/a9d515a09cc2/LICENSE,MIT -github.com/aws/aws-sdk-go-v2,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/config,https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.36/config/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/credentials,https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.34/credentials/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/feature/ec2/imds,https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.14/feature/ec2/imds/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/configsources,https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.18/internal/configsources/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2,https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.18/internal/endpoints/v2/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/ini,https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/internal/sync/singleflight/LICENSE,BSD-3-Clause -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.5/service/internal/accept-encoding/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.20/service/internal/presigned-url/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/route53,https://github.com/aws/aws-sdk-go-v2/blob/service/route53/v1.44.0/service/route53/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/sso,https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.23.0/service/sso/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/ssooidc,https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.27.0/service/ssooidc/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/sts,https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.31.0/service/sts/LICENSE.txt,Apache-2.0 -github.com/aws/smithy-go,https://github.com/aws/smithy-go/blob/v1.21.0/LICENSE,Apache-2.0 -github.com/aws/smithy-go/internal/sync/singleflight,https://github.com/aws/smithy-go/blob/v1.21.0/internal/sync/singleflight/LICENSE,BSD-3-Clause -github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT -github.com/blang/semver/v4,https://github.com/blang/semver/blob/v4.0.0/v4/LICENSE,MIT -github.com/cenkalti/backoff/v4,https://github.com/cenkalti/backoff/blob/v4.3.0/LICENSE,MIT -github.com/cert-manager/cert-manager,https://github.com/cert-manager/cert-manager/blob/HEAD/LICENSE,Apache-2.0 -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/azuredns/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/clouddns/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/cloudflare/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/route53/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/util/LICENSE,MIT -github.com/cert-manager/cert-manager/third_party/forked/acme,https://github.com/cert-manager/cert-manager/blob/HEAD/third_party/forked/acme/LICENSE,BSD-3-Clause -github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt,MIT -github.com/coreos/go-semver/semver,https://github.com/coreos/go-semver/blob/v0.3.1/LICENSE,Apache-2.0 -github.com/coreos/go-systemd/v22,https://github.com/coreos/go-systemd/blob/v22.5.0/LICENSE,Apache-2.0 -github.com/cpu/goacmedns,https://github.com/cpu/goacmedns/blob/v0.1.1/LICENSE,MIT -github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/d8f796af33cc/LICENSE,ISC -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,MIT -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,BSD-3-Clause -github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.12.1/LICENSE,MIT -github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.9.0/v5/LICENSE,BSD-3-Clause -github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.4/LICENSE.txt,MIT -github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.8.0/LICENSE,BSD-3-Clause -github.com/fxamacker/cbor/v2,https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE,MIT -github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.6/LICENSE,MIT -github.com/go-http-utils/headers,https://github.com/go-http-utils/headers/blob/fed159eddc2a/LICENSE,MIT -github.com/go-jose/go-jose/v4,https://github.com/go-jose/go-jose/blob/v4.0.5/LICENSE,Apache-2.0 -github.com/go-jose/go-jose/v4/json,https://github.com/go-jose/go-jose/blob/v4.0.5/json/LICENSE,BSD-3-Clause -github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.8/v3/LICENSE,MIT -github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.2/LICENSE,Apache-2.0 -github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.3.0/LICENSE,Apache-2.0 -github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.21.0/LICENSE,Apache-2.0 -github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.21.0/LICENSE,Apache-2.0 -github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.23.0/LICENSE,Apache-2.0 -github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause -github.com/golang-jwt/jwt/v5,https://github.com/golang-jwt/jwt/blob/v5.2.2/LICENSE,MIT -github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 -github.com/golang/protobuf/proto,https://github.com/golang/protobuf/blob/v1.5.4/LICENSE,BSD-3-Clause -github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BSD-3-Clause -github.com/google/btree,https://github.com/google/btree/blob/v1.1.3/LICENSE,Apache-2.0 -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.22.1/LICENSE,Apache-2.0 -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.22.1/LICENSE,BSD-3-Clause -github.com/google/gnostic-models,https://github.com/google/gnostic-models/blob/v0.6.9/LICENSE,Apache-2.0 -github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.6.0/LICENSE,BSD-3-Clause -github.com/google/go-querystring/query,https://github.com/google/go-querystring/blob/v1.1.0/LICENSE,BSD-3-Clause -github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.8/LICENSE.md,Apache-2.0 -github.com/google/uuid,https://github.com/google/uuid/blob/v1.6.0/LICENSE,BSD-3-Clause -github.com/googleapis/enterprise-certificate-proxy/client,https://github.com/googleapis/enterprise-certificate-proxy/blob/v0.3.4/LICENSE,Apache-2.0 -github.com/googleapis/gax-go/v2,https://github.com/googleapis/gax-go/blob/v2.13.0/v2/LICENSE,BSD-3-Clause -github.com/gorilla/websocket,https://github.com/gorilla/websocket/blob/v1.5.3/LICENSE,BSD-2-Clause -github.com/grpc-ecosystem/go-grpc-prometheus,https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/grpc-ecosystem/grpc-gateway/v2,https://github.com/grpc-ecosystem/grpc-gateway/blob/v2.25.1/LICENSE,BSD-3-Clause -github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.1.0/LICENSE,MPL-2.0 -github.com/hashicorp/go-cleanhttp,https://github.com/hashicorp/go-cleanhttp/blob/v0.5.2/LICENSE,MPL-2.0 -github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 -github.com/hashicorp/go-retryablehttp,https://github.com/hashicorp/go-retryablehttp/blob/v0.7.7/LICENSE,MPL-2.0 -github.com/hashicorp/go-rootcerts,https://github.com/hashicorp/go-rootcerts/blob/v1.0.2/LICENSE,MPL-2.0 -github.com/hashicorp/go-secure-stdlib/parseutil,https://github.com/hashicorp/go-secure-stdlib/blob/parseutil/v0.1.8/parseutil/LICENSE,MPL-2.0 -github.com/hashicorp/go-secure-stdlib/strutil,https://github.com/hashicorp/go-secure-stdlib/blob/strutil/v0.1.2/strutil/LICENSE,MPL-2.0 -github.com/hashicorp/go-sockaddr,https://github.com/hashicorp/go-sockaddr/blob/v1.0.6/LICENSE,MPL-2.0 -github.com/hashicorp/hcl,https://github.com/hashicorp/hcl/blob/v1.0.1-vault-5/LICENSE,MPL-2.0 -github.com/hashicorp/vault/api,https://github.com/hashicorp/vault/blob/api/v1.15.0/api/LICENSE,MPL-2.0 -github.com/hashicorp/vault/sdk/helper,https://github.com/hashicorp/vault/blob/sdk/v0.14.0/sdk/LICENSE,MPL-2.0 -github.com/jmespath/go-jmespath,https://github.com/jmespath/go-jmespath/blob/b0104c826a24/LICENSE,Apache-2.0 -github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT -github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,MIT -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,Apache-2.0 -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,BSD-3-Clause -github.com/klauspost/compress/internal/snapref,https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE,BSD-3-Clause -github.com/klauspost/compress/zstd/internal/xxhash,https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt,MIT -github.com/kr/pretty,https://github.com/kr/pretty/blob/v0.3.1/License,MIT -github.com/kr/text,https://github.com/kr/text/blob/v0.2.0/License,MIT -github.com/kylelemons/godebug,https://github.com/kylelemons/godebug/blob/v1.1.0/LICENSE,Apache-2.0 -github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.9.0/LICENSE,MIT -github.com/miekg/dns,https://github.com/miekg/dns/blob/v1.1.62/LICENSE,BSD-3-Clause -github.com/mitchellh/go-homedir,https://github.com/mitchellh/go-homedir/blob/v1.1.0/LICENSE,MIT -github.com/mitchellh/mapstructure,https://github.com/mitchellh/mapstructure/blob/v1.5.0/LICENSE,MIT -github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 -github.com/modern-go/reflect2,https://github.com/modern-go/reflect2/blob/v1.0.2/LICENSE,Apache-2.0 -github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE,BSD-3-Clause -github.com/patrickmn/go-cache,https://github.com/patrickmn/go-cache/blob/v2.1.0/LICENSE,MIT -github.com/pavlo-v-chernykh/keystore-go/v4,https://github.com/pavlo-v-chernykh/keystore-go/blob/v4.5.0/LICENSE,MIT -github.com/pierrec/lz4,https://github.com/pierrec/lz4/blob/v2.6.1/LICENSE,BSD-3-Clause -github.com/pkg/browser,https://github.com/pkg/browser/blob/5ac0b6a4141c/LICENSE,BSD-2-Clause -github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause -github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,https://github.com/prometheus/client_golang/blob/v1.20.5/internal/github.com/golang/gddo/LICENSE,BSD-3-Clause -github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.20.5/LICENSE,Apache-2.0 -github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE,Apache-2.0 -github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.61.0/LICENSE,Apache-2.0 -github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.15.1/LICENSE,Apache-2.0 -github.com/rogpeppe/go-internal/fmtsort,https://github.com/rogpeppe/go-internal/blob/v1.13.1/LICENSE,BSD-3-Clause -github.com/ryanuber/go-glob,https://github.com/ryanuber/go-glob/blob/v1.0.0/LICENSE,MIT -github.com/sirupsen/logrus,https://github.com/sirupsen/logrus/blob/v1.9.3/LICENSE,MIT -github.com/sosodev/duration,https://github.com/sosodev/duration/blob/v1.3.1/LICENSE,MIT -github.com/spf13/cobra,https://github.com/spf13/cobra/blob/v1.8.1/LICENSE.txt,Apache-2.0 -github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause -github.com/stoewer/go-strcase,https://github.com/stoewer/go-strcase/blob/v1.3.0/LICENSE,MIT -github.com/vektah/gqlparser/v2,https://github.com/vektah/gqlparser/blob/v2.5.15/LICENSE,MIT -github.com/x448/float16,https://github.com/x448/float16/blob/v0.8.4/LICENSE,MIT -github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/3c2c7870ae76/LICENSE,MIT -go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.17/api/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.17/client/pkg/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.17/client/v3/LICENSE,Apache-2.0 -go.opencensus.io,https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/LICENSE,Apache-2.0 -go.opentelemetry.io/auto/sdk,https://github.com/open-telemetry/opentelemetry-go-instrumentation/blob/sdk/v1.1.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.58.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.58.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 -go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.33.0/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.33.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.33.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.33.0/metric/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.33.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.33.0/trace/LICENSE,Apache-2.0 -go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.4.0/otlp/LICENSE,Apache-2.0 -go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT -go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.27.0/LICENSE,MIT -golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE,BSD-3-Clause -golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/b2144cdd:LICENSE,BSD-3-Clause -golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.38.0:LICENSE,BSD-3-Clause -golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.28.0:LICENSE,BSD-3-Clause -golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE,BSD-3-Clause -golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE,BSD-3-Clause -golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE,BSD-3-Clause -golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.8.0:LICENSE,BSD-3-Clause -gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 -google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/LICENSE,BSD-3-Clause -google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause -google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/6b3ec007d9bb/googleapis/api/LICENSE,Apache-2.0 -google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/6b3ec007d9bb/googleapis/rpc/LICENSE,Apache-2.0 -google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.69.2/LICENSE,Apache-2.0 -google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.36.0/LICENSE,BSD-3-Clause -gopkg.in/evanphx/json-patch.v4,https://github.com/evanphx/json-patch/blob/v4.12.0/LICENSE,BSD-3-Clause -gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause -gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.67.0/LICENSE,Apache-2.0 -gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT -gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 -gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT -k8s.io/api,https://github.com/kubernetes/api/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.32.0/third_party/forked/golang/LICENSE,BSD-3-Clause -k8s.io/apiserver,https://github.com/kubernetes/apiserver/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/component-base,https://github.com/kubernetes/component-base/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.130.1/LICENSE,Apache-2.0 -k8s.io/kms,https://github.com/kubernetes/kms/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/kube-aggregator/pkg/apis/apiregistration,https://github.com/kubernetes/kube-aggregator/blob/v0.31.1/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause -k8s.io/kube-openapi/pkg/internal/third_party/govalidator,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/internal/third_party/govalidator/LICENSE,MIT -k8s.io/kube-openapi/pkg/validation/errors,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/errors/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/spec/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/strfmt,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/strfmt/LICENSE,Apache-2.0 -k8s.io/utils,https://github.com/kubernetes/utils/blob/24370beab758/LICENSE,Apache-2.0 -k8s.io/utils/internal/third_party/forked/golang,https://github.com/kubernetes/utils/blob/24370beab758/internal/third_party/forked/golang/LICENSE,BSD-3-Clause -sigs.k8s.io/apiserver-network-proxy/konnectivity-client,https://github.com/kubernetes-sigs/apiserver-network-proxy/blob/konnectivity-client/v0.31.1/konnectivity-client/LICENSE,Apache-2.0 -sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.19.0/LICENSE,Apache-2.0 -sigs.k8s.io/gateway-api,https://github.com/kubernetes-sigs/gateway-api/blob/v1.1.0/LICENSE,Apache-2.0 -sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/cfa47c3a1cc8/LICENSE,Apache-2.0 -sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/cfa47c3a1cc8/LICENSE,BSD-3-Clause -sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.5.0/LICENSE,Apache-2.0 -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,MIT -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,Apache-2.0 -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,BSD-3-Clause -sigs.k8s.io/yaml/goyaml.v2,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE,Apache-2.0 -software.sslmate.com/src/go-pkcs12,https://github.com/SSLMate/go-pkcs12/blob/v0.5.0/LICENSE,BSD-3-Clause +This LICENSES file is generated by the `licenses` module in makefile-modules[0]. + +The licenses below the "---" are determined by the go-licenses tool[1]. + +The aim of this file is to collect the licenses of all dependencies, and provide +a single source of truth for licenses used by this project. + +## For Developers + +If CI reports that this file is out of date, you should be careful to check that the +new licenses are acceptable for this project before running `make generate-go-licenses` +to update this file. + +Acceptable licenses are those allowlisted by the CNCF[2]. + +You MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF, +or which do not have an explicit license exception[3]. + +## For Users + +If this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release. + +You can retrieve the actual license text by following these steps: + +1. Find the dependency name in this file +2. Go to the source code repository of this project, and go to the tag corresponding to this release. +3. Find the exact version of the dependency in the `go.mod` file +4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/). + +## Links + +[0]: https://github.com/cert-manager/makefile-modules/ +[1]: https://github.com/google/go-licenses +[2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy +[3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md + +--- + +cel.dev/expr,Apache-2.0 +cloud.google.com/go/auth,Apache-2.0 +cloud.google.com/go/auth/oauth2adapt,Apache-2.0 +cloud.google.com/go/compute/metadata,Apache-2.0 +github.com/Azure/azure-sdk-for-go/sdk/azcore,MIT +github.com/Azure/azure-sdk-for-go/sdk/azidentity,MIT +github.com/Azure/azure-sdk-for-go/sdk/internal,MIT +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,MIT +github.com/Azure/go-ntlmssp,MIT +github.com/AzureAD/microsoft-authentication-library-for-go/apps,MIT +github.com/Khan/genqlient/graphql,MIT +github.com/NYTimes/gziphandler,Apache-2.0 +github.com/Venafi/vcert/v5,Apache-2.0 +github.com/akamai/AkamaiOPEN-edgegrid-golang/v12/pkg,Apache-2.0 +github.com/antlr4-go/antlr/v4,BSD-3-Clause +github.com/aws/aws-sdk-go-v2,Apache-2.0 +github.com/aws/aws-sdk-go-v2/config,Apache-2.0 +github.com/aws/aws-sdk-go-v2/credentials,Apache-2.0 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/configsources,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/ini,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,BSD-3-Clause +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/route53,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sso,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/ssooidc,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sts,Apache-2.0 +github.com/aws/smithy-go,Apache-2.0 +github.com/aws/smithy-go/internal/sync/singleflight,BSD-3-Clause +github.com/benbjohnson/clock,MIT +github.com/beorn7/perks/quantile,MIT +github.com/blang/semver/v4,MIT +github.com/cenkalti/backoff/v4,MIT +github.com/cenkalti/backoff/v5,MIT +github.com/cert-manager/cert-manager,Apache-2.0 +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,MIT +github.com/cert-manager/cert-manager/third_party/forked/acme,BSD-3-Clause +github.com/cespare/xxhash/v2,MIT +github.com/coreos/go-semver/semver,Apache-2.0 +github.com/coreos/go-systemd/v22,Apache-2.0 +github.com/davecgh/go-spew/spew,ISC +github.com/digitalocean/godo,MIT +github.com/digitalocean/godo,BSD-3-Clause +github.com/emicklei/go-restful/v3,MIT +github.com/evanphx/json-patch/v5,BSD-3-Clause +github.com/felixge/httpsnoop,MIT +github.com/fsnotify/fsnotify,BSD-3-Clause +github.com/fxamacker/cbor/v2,MIT +github.com/go-asn1-ber/asn1-ber,MIT +github.com/go-http-utils/headers,MIT +github.com/go-jose/go-jose/v4,Apache-2.0 +github.com/go-jose/go-jose/v4/json,BSD-3-Clause +github.com/go-ldap/ldap/v3,MIT +github.com/go-logr/logr,Apache-2.0 +github.com/go-logr/stdr,Apache-2.0 +github.com/go-logr/zapr,Apache-2.0 +github.com/go-openapi/jsonpointer,Apache-2.0 +github.com/go-openapi/jsonreference,Apache-2.0 +github.com/go-openapi/swag,Apache-2.0 +github.com/go-openapi/swag/jsonname,Apache-2.0 +github.com/go-ozzo/ozzo-validation/v4,MIT +github.com/gogo/protobuf,BSD-3-Clause +github.com/golang-jwt/jwt/v5,MIT +github.com/golang/protobuf/proto,BSD-3-Clause +github.com/golang/snappy,BSD-3-Clause +github.com/google/btree,Apache-2.0 +github.com/google/cel-go,Apache-2.0 +github.com/google/cel-go,BSD-3-Clause +github.com/google/certificate-transparency-go,Apache-2.0 +github.com/google/gnostic-models,Apache-2.0 +github.com/google/go-cmp/cmp,BSD-3-Clause +github.com/google/go-querystring/query,BSD-3-Clause +github.com/google/s2a-go,Apache-2.0 +github.com/google/uuid,BSD-3-Clause +github.com/googleapis/enterprise-certificate-proxy/client,Apache-2.0 +github.com/googleapis/gax-go/v2,BSD-3-Clause +github.com/gorilla/websocket,BSD-2-Clause +github.com/grpc-ecosystem/go-grpc-prometheus,Apache-2.0 +github.com/grpc-ecosystem/grpc-gateway/v2,BSD-3-Clause +github.com/hashicorp/errwrap,MPL-2.0 +github.com/hashicorp/go-cleanhttp,MPL-2.0 +github.com/hashicorp/go-hmac-drbg/hmacdrbg,MIT +github.com/hashicorp/go-multierror,MPL-2.0 +github.com/hashicorp/go-retryablehttp,MPL-2.0 +github.com/hashicorp/go-rootcerts,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/cryptoutil,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/parseutil,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/strutil,MPL-2.0 +github.com/hashicorp/go-sockaddr,MPL-2.0 +github.com/hashicorp/hcl,MPL-2.0 +github.com/hashicorp/vault/api,MPL-2.0 +github.com/hashicorp/vault/sdk/helper,MPL-2.0 +github.com/josharian/intern,MIT +github.com/json-iterator/go,MIT +github.com/kylelemons/godebug,Apache-2.0 +github.com/mailru/easyjson,MIT +github.com/miekg/dns,BSD-3-Clause +github.com/mitchellh/go-homedir,MIT +github.com/mitchellh/mapstructure,MIT +github.com/modern-go/concurrent,Apache-2.0 +github.com/modern-go/reflect2,Apache-2.0 +github.com/munnerz/goautoneg,BSD-3-Clause +github.com/nrdcg/goacmedns,MIT +github.com/pavlo-v-chernykh/keystore-go/v4,MIT +github.com/pierrec/lz4,BSD-3-Clause +github.com/pkg/browser,BSD-2-Clause +github.com/pmezard/go-difflib/difflib,BSD-3-Clause +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,Apache-2.0 +github.com/prometheus/client_model/go,Apache-2.0 +github.com/prometheus/common,Apache-2.0 +github.com/prometheus/procfs,Apache-2.0 +github.com/ryanuber/go-glob,MIT +github.com/sosodev/duration,MIT +github.com/spf13/cobra,Apache-2.0 +github.com/spf13/pflag,BSD-3-Clause +github.com/stoewer/go-strcase,MIT +github.com/stretchr/objx,MIT +github.com/stretchr/testify,MIT +github.com/vektah/gqlparser/v2,MIT +github.com/x448/float16,MIT +github.com/youmark/pkcs8,MIT +go.etcd.io/etcd/api/v3,Apache-2.0 +go.etcd.io/etcd/client/pkg/v3,Apache-2.0 +go.etcd.io/etcd/client/v3,Apache-2.0 +go.opentelemetry.io/auto/sdk,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,Apache-2.0 +go.opentelemetry.io/otel,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,Apache-2.0 +go.opentelemetry.io/otel/metric,Apache-2.0 +go.opentelemetry.io/otel/sdk,Apache-2.0 +go.opentelemetry.io/otel/trace,Apache-2.0 +go.opentelemetry.io/proto/otlp,Apache-2.0 +go.uber.org/multierr,MIT +go.uber.org/ratelimit,MIT +go.uber.org/zap,MIT +go.yaml.in/yaml/v2,Apache-2.0 +go.yaml.in/yaml/v3,MIT +golang.org/x/crypto,BSD-3-Clause +golang.org/x/exp/slices,BSD-3-Clause +golang.org/x/net,BSD-3-Clause +golang.org/x/oauth2,BSD-3-Clause +golang.org/x/sync,BSD-3-Clause +golang.org/x/sys,BSD-3-Clause +golang.org/x/term,BSD-3-Clause +golang.org/x/text,BSD-3-Clause +golang.org/x/time/rate,BSD-3-Clause +gomodules.xyz/jsonpatch/v2,Apache-2.0 +google.golang.org/api,BSD-3-Clause +google.golang.org/api/internal/third_party/uritemplates,BSD-3-Clause +google.golang.org/genproto/googleapis/api,Apache-2.0 +google.golang.org/genproto/googleapis/rpc,Apache-2.0 +google.golang.org/grpc,Apache-2.0 +google.golang.org/protobuf,BSD-3-Clause +gopkg.in/evanphx/json-patch.v4,BSD-3-Clause +gopkg.in/inf.v0,BSD-3-Clause +gopkg.in/ini.v1,Apache-2.0 +gopkg.in/natefinch/lumberjack.v2,MIT +gopkg.in/yaml.v2,Apache-2.0 +gopkg.in/yaml.v3,MIT +k8s.io/api,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,Apache-2.0 +k8s.io/apimachinery/pkg,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,BSD-3-Clause +k8s.io/apiserver,Apache-2.0 +k8s.io/client-go,Apache-2.0 +k8s.io/component-base,Apache-2.0 +k8s.io/klog/v2,Apache-2.0 +k8s.io/kms,Apache-2.0 +k8s.io/kube-aggregator/pkg/apis/apiregistration,Apache-2.0 +k8s.io/kube-openapi/pkg,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,BSD-3-Clause +k8s.io/kube-openapi/pkg/internal/third_party/govalidator,MIT +k8s.io/kube-openapi/pkg/validation/errors,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/spec,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/strfmt,Apache-2.0 +k8s.io/utils,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang,BSD-3-Clause +sigs.k8s.io/apiserver-network-proxy/konnectivity-client,Apache-2.0 +sigs.k8s.io/controller-runtime,Apache-2.0 +sigs.k8s.io/gateway-api,Apache-2.0 +sigs.k8s.io/json,Apache-2.0 +sigs.k8s.io/json,BSD-3-Clause +sigs.k8s.io/randfill,Apache-2.0 +sigs.k8s.io/structured-merge-diff/v6,Apache-2.0 +sigs.k8s.io/yaml,MIT +sigs.k8s.io/yaml,Apache-2.0 +sigs.k8s.io/yaml,BSD-3-Clause +software.sslmate.com/src/go-pkcs12,BSD-3-Clause diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go index 92b6583d6..5ba5e8f1c 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go @@ -16,5 +16,6 @@ limitations under the License. // Package v1 is the v1 version of the API. // +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true // +groupName=acme.cert-manager.io package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go index 34bae15b8..dc3bb1b37 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go @@ -25,15 +25,14 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion - -// Challenge is a type to represent a Challenge request with an ACME server -// +k8s:openapi-gen=true // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" // +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.dnsName" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} // +kubebuilder:subresource:status -// +kubebuilder:resource:path=challenges + +// Challenge is a type to represent a Challenge request with an ACME server type Challenge struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` @@ -98,7 +97,7 @@ type ChallengeSpec struct { // If the Issuer does not exist, processing will be retried. // If the Issuer is not an 'ACME' Issuer, an error will be returned and the // Challenge will be marked as failed. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` } // The type of ACME challenge. Only HTTP-01 and DNS-01 are supported. diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go index 54cb4b97e..009b1abe8 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go @@ -96,6 +96,7 @@ type ACMEIssuer struct { // from an ACME server. // For more information, see: https://cert-manager.io/docs/configuration/acme/ // +optional + // +listType=atomic Solvers []ACMEChallengeSolver `json:"solvers,omitempty"` // Enables or disables generating a new ACME account key. @@ -196,6 +197,7 @@ type CertificateDNSNameSelector struct { // If neither has more matches, the solver defined earlier in the list // will be selected. // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // List of DNSZones that this solver will be used to solve. @@ -208,6 +210,7 @@ type CertificateDNSNameSelector struct { // If neither has more matches, the solver defined earlier in the list // will be selected. // +optional + // +listType=atomic DNSZones []string `json:"dnsZones,omitempty"` } @@ -290,6 +293,8 @@ type ACMEChallengeSolverHTTP01GatewayHTTPRoute struct { // cert-manager needs to know which parentRefs should be used when creating // the HTTPRoute. Usually, the parentRef references a Gateway. See: // https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + // +optional + // +listType=atomic ParentRefs []gwapi.ParentReference `json:"parentRefs,omitempty"` // Optional pod template used to configure the ACME challenge solver pods @@ -336,6 +341,7 @@ type ACMEChallengeSolverHTTP01IngressPodSpec struct { // If specified, the pod's tolerations. // +optional + // +listType=atomic Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // If specified, the pod's priorityClassName. @@ -348,11 +354,24 @@ type ACMEChallengeSolverHTTP01IngressPodSpec struct { // If specified, the pod's imagePullSecrets // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchMergeKey:"name" patchStrategy:"merge"` // If specified, the pod's security context // +optional SecurityContext *ACMEChallengeSolverHTTP01IngressPodSecurityContext `json:"securityContext,omitempty"` + + // If specified, the pod's resource requirements. + // These values override the global resource configuration flags. + // Note that when only specifying resource limits, ensure they are greater than or equal + // to the corresponding global resource requests configured via controller flags + // (--acme-http01-solver-resource-request-cpu, --acme-http01-solver-resource-request-memory). + // Kubernetes will reject pod creation if limits are lower than requests, causing challenge failures. + // +optional + Resources *ACMEChallengeSolverHTTP01IngressPodResources `json:"resources,omitempty"` } type ACMEChallengeSolverHTTP01IngressTemplate struct { @@ -464,6 +483,7 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { // even if they are not included in this list. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume @@ -481,6 +501,7 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic Sysctls []corev1.Sysctl `json:"sysctls,omitempty"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume // before being exposed inside Pod. This field will only apply to @@ -497,6 +518,21 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { SeccompProfile *corev1.SeccompProfile `json:"seccompProfile,omitempty"` } +// ACMEChallengeSolverHTTP01IngressPodResources defines resource requirements for ACME HTTP01 solver pods. +// To keep API surface essential, this trims down the 'corev1.ResourceRequirements' type to only include the Requests and Limits fields. +type ACMEChallengeSolverHTTP01IngressPodResources struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits corev1.ResourceList `json:"limits,omitempty"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to the global values configured via controller flags. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests corev1.ResourceList `json:"requests,omitempty"` +} + // CNAMEStrategy configures how the DNS01 provider should handle CNAME records // when found in DNS zones. // By default, the None strategy will be applied (i.e. do not follow CNAMEs). @@ -658,6 +694,7 @@ type ServiceAccountRef struct { // and name is always included. // If unset the audience defaults to `sts.amazonaws.com`. // +optional + // +listType=atomic TokenAudiences []string `json:"audiences,omitempty"` } @@ -764,8 +801,22 @@ type ACMEIssuerDNS01ProviderRFC2136 struct { // ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. // +optional TSIGAlgorithm string `json:"tsigAlgorithm,omitempty"` + + // Protocol to use for dynamic DNS update queries. Valid values are (case-sensitive) ``TCP`` and ``UDP``; ``UDP`` (default). + // +optional + Protocol RFC2136UpdateProtocol `json:"protocol,omitempty"` } +// +kubebuilder:validation:Enum=TCP;UDP +type RFC2136UpdateProtocol string + +const ( + // RFC2136UpdateProtocolTCP utilizes TCP to update queries. + RFC2136UpdateProtocolTCP RFC2136UpdateProtocol = "TCP" + // RFC2136UpdateProtocolUDP utilizes UDP to update queries. + RFC2136UpdateProtocolUDP RFC2136UpdateProtocol = "UDP" +) + // ACMEIssuerDNS01ProviderWebhook specifies configuration for a webhook DNS01 // provider, including where to POST ChallengePayload resources. type ACMEIssuerDNS01ProviderWebhook struct { diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go index c03a6a903..e7e199c31 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go @@ -25,9 +25,14 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=".spec.issuerRef.name",priority=1 +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} +// +kubebuilder:subresource:status // Order is a type to represent an Order with an ACME server -// +k8s:openapi-gen=true type Order struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` @@ -58,7 +63,7 @@ type OrderSpec struct { // If the Issuer does not exist, processing will be retried. // If the Issuer is not an 'ACME' Issuer, an error will be returned and the // Order will be marked as failed. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // CommonName is the common name as specified on the DER encoded CSR. // If specified, this value must also be present in `dnsNames` or `ipAddresses`. @@ -69,13 +74,15 @@ type OrderSpec struct { // DNSNames is a list of DNS names that should be included as part of the Order // validation process. // This field must match the corresponding field on the DER encoded CSR. - //+optional + // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // IPAddresses is a list of IP addresses that should be included as part of the Order // validation process. // This field must match the corresponding field on the DER encoded CSR. // +optional + // +listType=atomic IPAddresses []string `json:"ipAddresses,omitempty"` // Duration is the duration for the not after date for the requested certificate. @@ -106,6 +113,7 @@ type OrderStatus struct { // authorizations must be completed in order to validate the DNS names // specified on the Order. // +optional + // +listType=atomic Authorizations []ACMEAuthorization `json:"authorizations,omitempty"` // Certificate is a copy of the PEM encoded certificate for this Order. @@ -166,6 +174,7 @@ type ACMEAuthorization struct { // name and an appropriate Challenge resource will be created to perform // the ACME challenge process. // +optional + // +listType=atomic Challenges []ACMEChallenge `json:"challenges,omitempty"` } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go index 09f27f5cc..e1b4500da 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go @@ -321,6 +321,36 @@ func (in *ACMEChallengeSolverHTTP01IngressPodObjectMeta) DeepCopy() *ACMEChallen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodResources) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodResources) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodResources. +func (in *ACMEChallengeSolverHTTP01IngressPodResources) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodResources { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ACMEChallengeSolverHTTP01IngressPodSecurityContext) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodSecurityContext) { *out = *in @@ -414,6 +444,11 @@ func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopyInto(out *ACMEChallen *out = new(ACMEChallengeSolverHTTP01IngressPodSecurityContext) (*in).DeepCopyInto(*out) } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ACMEChallengeSolverHTTP01IngressPodResources) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go index 348211c68..3830f7af3 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go @@ -16,6 +16,7 @@ limitations under the License. // Package v1 is the v1 version of the API. // +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true // +groupName=cert-manager.io // +groupGoName=Certmanager package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go index 3208068c9..bc5475a32 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go @@ -27,6 +27,13 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Secret",type="string",JSONPath=`.spec.secretName` +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=`.spec.issuerRef.name`,priority=1 +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName={cert,certs},categories=cert-manager +// +kubebuilder:subresource:status // A Certificate resource should be created to ensure an up to date and signed // X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. @@ -200,14 +207,17 @@ type CertificateSpec struct { // Requested DNS subject alternative names. // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // Requested IP address subject alternative names. // +optional + // +listType=atomic IPAddresses []string `json:"ipAddresses,omitempty"` // Requested URI subject alternative names. // +optional + // +listType=atomic URIs []string `json:"uris,omitempty"` // `otherNames` is an escape hatch for SAN that allows any type. We currently restrict the support to string like otherNames, cf RFC 5280 p 37 @@ -215,10 +225,12 @@ type CertificateSpec struct { // Most commonly this would be UPN set with oid: 1.3.6.1.4.1.311.20.2.3 // You should ensure that any OID passed is valid for the UTF8String type as we do not explicitly validate this. // +optional + // +listType=atomic OtherNames []OtherName `json:"otherNames,omitempty"` // Requested email subject alternative names. // +optional + // +listType=atomic EmailAddresses []string `json:"emailAddresses,omitempty"` // Name of the Secret resource that will be automatically created and @@ -245,7 +257,7 @@ type CertificateSpec struct { // from any namespace. // // The `name` field of the reference must always be specified. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // Requested basic constraints isCA value. // The isCA value is used to set the `isCA` field on the created CertificateRequest @@ -264,6 +276,7 @@ type CertificateSpec struct { // // If unset, defaults to `digital signature` and `key encipherment`. // +optional + // +listType=atomic Usages []KeyUsage `json:"usages,omitempty"` // Private key options. These include the key algorithm and size, the used @@ -299,6 +312,7 @@ type CertificateSpec struct { // Defines extra output formats of the private key and signed certificate chain // to be written to this Certificate's target Secret. // +optional + // +listType=atomic AdditionalOutputFormats []CertificateAdditionalOutputFormat `json:"additionalOutputFormats,omitempty"` // x.509 certificate NameConstraint extension which MUST NOT be used in a non-CA certificate. @@ -439,24 +453,31 @@ type CertificateAdditionalOutputFormat struct { type X509Subject struct { // Organizations to be used on the Certificate. // +optional + // +listType=atomic Organizations []string `json:"organizations,omitempty"` // Countries to be used on the Certificate. // +optional + // +listType=atomic Countries []string `json:"countries,omitempty"` // Organizational Units to be used on the Certificate. // +optional + // +listType=atomic OrganizationalUnits []string `json:"organizationalUnits,omitempty"` // Cities to be used on the Certificate. // +optional + // +listType=atomic Localities []string `json:"localities,omitempty"` // State/Provinces to be used on the Certificate. // +optional + // +listType=atomic Provinces []string `json:"provinces,omitempty"` // Street addresses to be used on the Certificate. // +optional + // +listType=atomic StreetAddresses []string `json:"streetAddresses,omitempty"` // Postal codes to be used on the Certificate. // +optional + // +listType=atomic PostalCodes []string `json:"postalCodes,omitempty"` // Serial number to be used on the Certificate. // +optional @@ -568,9 +589,9 @@ const ( type CertificateStatus struct { // List of status conditions to indicate the status of certificates. // Known condition types are `Ready` and `Issuing`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []CertificateCondition `json:"conditions,omitempty"` // LastFailureTime is set only if the latest issuance for this @@ -729,18 +750,22 @@ type NameConstraintItem struct { // DNSDomains is a list of DNS domains that are permitted or excluded. // // +optional + // +listType=atomic DNSDomains []string `json:"dnsDomains,omitempty"` // IPRanges is a list of IP Ranges that are permitted or excluded. // This should be a valid CIDR notation. // // +optional + // +listType=atomic IPRanges []string `json:"ipRanges,omitempty"` // EmailAddresses is a list of Email Addresses that are permitted or excluded. // // +optional + // +listType=atomic EmailAddresses []string `json:"emailAddresses,omitempty"` // URIDomains is a list of URI domains that are permitted or excluded. // // +optional + // +listType=atomic URIDomains []string `json:"uriDomains,omitempty"` } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go index 8f31d84c0..a948f1129 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go @@ -45,6 +45,15 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Approved",type="string",JSONPath=`.status.conditions[?(@.type == "Approved")].status` +// +kubebuilder:printcolumn:name="Denied",type="string",JSONPath=`.status.conditions[?(@.type == "Denied")].status` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=`.spec.issuerRef.name` +// +kubebuilder:printcolumn:name="Requester",type="string",JSONPath=`.spec.username` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName={cr,crs},categories=cert-manager +// +kubebuilder:subresource:status // A CertificateRequest is used to request a signed certificate from one of the // configured issuers. @@ -55,7 +64,6 @@ const ( // // A CertificateRequest is a one-shot resource, meaning it represents a single // point in time request for a certificate and cannot be re-used. -// +k8s:openapi-gen=true type CertificateRequest struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -110,7 +118,7 @@ type CertificateRequestSpec struct { // from any namespace. // // The `name` field of the reference must always be specified. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // The PEM-encoded X.509 certificate signing request to be submitted to the // issuer for signing. @@ -143,6 +151,7 @@ type CertificateRequestSpec struct { // // If unset, defaults to `digital signature` and `key encipherment`. // +optional + // +listType=atomic Usages []KeyUsage `json:"usages,omitempty"` // Username contains the name of the user that created the CertificateRequest. @@ -155,8 +164,8 @@ type CertificateRequestSpec struct { UID string `json:"uid,omitempty"` // Groups contains group membership of the user that created the CertificateRequest. // Populated by the cert-manager webhook on creation and immutable. - // +listType=atomic // +optional + // +listType=atomic Groups []string `json:"groups,omitempty"` // Extra contains extra attributes of the user that created the CertificateRequest. // Populated by the cert-manager webhook on creation and immutable. @@ -169,9 +178,9 @@ type CertificateRequestSpec struct { type CertificateRequestStatus struct { // List of status conditions to indicate the status of a CertificateRequest. // Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []CertificateRequestCondition `json:"conditions,omitempty"` // The PEM encoded X.509 certificate resulting from the certificate diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go index 90a55fcea..1cbd93f95 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go @@ -25,9 +25,13 @@ import ( // +genclient // +genclient:nonNamespaced -// +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Cluster,shortName=ciss,categories=cert-manager +// +kubebuilder:subresource:status // A ClusterIssuer represents a certificate issuing authority which can be // referenced as part of `issuerRef` fields. @@ -57,9 +61,13 @@ type ClusterIssuerList struct { } // +genclient -// +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName=iss,categories=cert-manager +// +kubebuilder:subresource:status // An Issuer represents a certificate issuing authority which can be // referenced as part of `issuerRef` fields. @@ -188,6 +196,7 @@ type SelfSignedIssuer struct { // the location of the CRL from which the revocation of this certificate can be checked. // If not set certificate will be issued without CDP. Values are strings. // +optional + // +listType=atomic CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` } @@ -348,6 +357,7 @@ type ServiceAccountRef struct { // TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token // consisting of the issuer's namespace and name is always included. // +optional + // +listType=atomic TokenAudiences []string `json:"audiences,omitempty"` } @@ -360,6 +370,7 @@ type CAIssuer struct { // the location of the CRL from which the revocation of this certificate can be checked. // If not set, certificates will be issued without distribution points set. // +optional + // +listType=atomic CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` // The OCSP server list is an X.509 v3 extension that defines a list of @@ -368,12 +379,14 @@ type CAIssuer struct { // certificate will be issued with no OCSP servers set. For example, an // OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". // +optional + // +listType=atomic OCSPServers []string `json:"ocspServers,omitempty"` // IssuingCertificateURLs is a list of URLs which this issuer should embed into certificates // it creates. See https://www.rfc-editor.org/rfc/rfc5280#section-4.2.2.1 for more details. // As an example, such a URL might be "http://ca.domain.com/ca.crt". // +optional + // +listType=atomic IssuingCertificateURLs []string `json:"issuingCertificateURLs,omitempty"` } @@ -381,9 +394,9 @@ type CAIssuer struct { type IssuerStatus struct { // List of status conditions to indicate the status of a CertificateRequest. // Known condition types are `Ready`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []IssuerCondition `json:"conditions,omitempty"` // ACME specific status options. diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go index f391663af..378fecfb2 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=meta.cert-manager.io - // Package meta contains meta types for cert-manager APIs package meta diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go index 9a673685d..7b5d3d1fb 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go @@ -16,6 +16,6 @@ limitations under the License. // Package v1 contains meta types for cert-manager APIs // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // +gencrdrefdocs:force -// +groupName=meta.cert-manager.io package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go index 80723a6c0..2b294e1a9 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go @@ -48,18 +48,24 @@ type LocalObjectReference struct { Name string `json:"name"` } -// ObjectReference is a reference to an object with a given name, kind and group. -type ObjectReference struct { - // Name of the resource being referred to. +// IssuerReference is a reference to a certificate issuer object with a given name, kind and group. +type IssuerReference struct { + // Name of the issuer being referred to. Name string `json:"name"` - // Kind of the resource being referred to. + // Kind of the issuer being referred to. + // Defaults to 'Issuer'. // +optional Kind string `json:"kind,omitempty"` - // Group of the resource being referred to. + // Group of the issuer being referred to. + // Defaults to 'cert-manager.io'. // +optional Group string `json:"group,omitempty"` } +// ObjectReference is a reference to an object with a given name, kind and group. +// Deprecated: Use IssuerReference instead. +type ObjectReference = IssuerReference + // A reference to a specific 'key' within a Secret resource. // In some instances, `key` is a required field. type SecretKeySelector struct { diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go index 9fa10e5e6..0d4af0708 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -22,33 +22,33 @@ limitations under the License. package v1 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { +func (in *IssuerReference) DeepCopyInto(out *IssuerReference) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerReference. +func (in *IssuerReference) DeepCopy() *IssuerReference { if in == nil { return nil } - out := new(LocalObjectReference) + out := new(IssuerReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { if in == nil { return nil } - out := new(ObjectReference) + out := new(LocalObjectReference) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/cucumber/gherkin/go/v26/.gitignore b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore new file mode 100644 index 000000000..7b0ee7aeb --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore @@ -0,0 +1,17 @@ +.built +.compared +.deps +.dist +.dist-compressed +.go-get +.gofmt +.linted +.tested* +acceptance/ +bin/ +dist/ +dist_compressed/ +*.bin +*.iml +# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump +core.*.!usr!bin!upx-ucl diff --git a/vendor/github.com/cucumber/gherkin/go/v26/LICENSE b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE new file mode 100644 index 000000000..29e136102 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Cucumber Ltd, Gaspar Nagy, Björn Rasmusson, Peter Sergeant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/cucumber/gherkin/go/v26/Makefile b/vendor/github.com/cucumber/gherkin/go/v26/Makefile new file mode 100644 index 000000000..d7d420854 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/Makefile @@ -0,0 +1,83 @@ +SHELL := /usr/bin/env bash + +GHERKIN_LANGUAGES_JSON = dialects_builtin.go +GHERKIN_PARSER = parser.go +GHERKIN_RAZOR = parser.go.razor +SOURCE_FILES = $(shell find . -name "*.go" | grep -v $(GHERKIN_PARSER)) + +GHERKIN = bin/gherkin +GHERKIN_GENERATE_TOKENS = bin/gherkin-generate-tokens + +GOOD_FEATURE_FILES = $(shell find ../testdata/good -name "*.feature") +BAD_FEATURE_FILES = $(shell find ../testdata/bad -name "*.feature") + +TOKENS = $(patsubst ../testdata/%,acceptance/testdata/%.tokens,$(GOOD_FEATURE_FILES)) +ASTS = $(patsubst ../testdata/%,acceptance/testdata/%.ast.ndjson,$(GOOD_FEATURE_FILES)) +PICKLES = $(patsubst ../testdata/%,acceptance/testdata/%.pickles.ndjson,$(GOOD_FEATURE_FILES)) +SOURCES = $(patsubst ../testdata/%,acceptance/testdata/%.source.ndjson,$(GOOD_FEATURE_FILES)) +ERRORS = $(patsubst ../testdata/%,acceptance/testdata/%.errors.ndjson,$(BAD_FEATURE_FILES)) + +.DEFAULT_GOAL = help + +help: ## Show this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +generate: $(GHERKIN_PARSER) ## Generate gherkin parser files + +clean-generate: ## Remove generated Gherkin parser files ## Generate gherkin parser files + rm -f $(GHERKIN_PARSER) + +copy-gherkin-languages: $(GHERKIN_LANGUAGES_JSON) ## Copy gherkin-languages.json and/or generate derived files + +clean-gherkin-languages: ## Remove gherkin-languages.json and any derived files + rm -f $(GHERKIN_LANGUAGES_JSON) + +clean: ## Remove all build artifacts and files generated by the acceptance tests + rm -rf .built + rm -rf acceptance + rm -rf bin + +.DELETE_ON_ERROR: + +acceptance: .built $(TOKENS) $(ASTS) $(PICKLES) $(ERRORS) $(SOURCES) ## Build acceptance test dir and compare results with reference + +.built: bin/gherkin-generate-tokens bin/gherkin + touch $@ + +bin/gherkin-generate-tokens: + go build -o $@ ./gherkin-generate-tokens + +bin/gherkin: + go build -o $@ -a ./main + +dialects_builtin.go: ../gherkin-languages.json dialects_builtin.go.jq + cat $< | jq --sort-keys --from-file dialects_builtin.go.jq --raw-output --compact-output > $@ + +$(GHERKIN_PARSER): $(GHERKIN_RAZOR) ../gherkin.berp + berp -g ../gherkin.berp -t $< -o $@ --noBOM + gofmt -w $@ + +acceptance/testdata/%.tokens: ../testdata/% ../testdata/%.tokens + mkdir -p $(@D) + $(GHERKIN_GENERATE_TOKENS) $< > $@ + diff --unified $<.tokens $@ + +acceptance/testdata/%.ast.ndjson: ../testdata/% ../testdata/%.ast.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.ast.ndjson) <(jq "." $@) + +acceptance/testdata/%.pickles.ndjson: ../testdata/% ../testdata/%.pickles.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --no-ast --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.pickles.ndjson) <(jq "." $@) + +acceptance/testdata/%.source.ndjson: ../testdata/% ../testdata/%.source.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-ast --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.source.ndjson) <(jq "." $@) + +acceptance/testdata/%.errors.ndjson: ../testdata/% ../testdata/%.errors.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.errors.ndjson) <(jq "." $@) diff --git a/vendor/github.com/cucumber/gherkin/go/v26/README.md b/vendor/github.com/cucumber/gherkin/go/v26/README.md new file mode 100644 index 000000000..cdaba03c9 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/README.md @@ -0,0 +1,37 @@ +# Gherkin for Go + +[![GoDoc](https://pkg.go.dev/github.com/cucumber/gherkin/go?status.svg)](http://godoc.org/github.com/cucumber/gherkin/go) + +Gherkin parser/compiler for Go. Please see [Gherkin](https://github.com/cucumber/gherkin) for details. + +## Building + +You need Go installed (obviously). You also need to make sure your `PATH` +points to where Go installs packages: + +```bash +# Add go bin to path +export PATH=$(go env GOPATH)/bin:${PATH} +``` + +Now build it: + +``` +make .dist +``` + +You should have cross-compiled binaries in `./dist/`. + +## Compress binaries + +You need [upx](https://upx.github.io/) installed. + +``` +make .dist +make .dist-compressed +``` + +Your `./dist_compressed/` directory should now have compressed binaries. +Compression fails for some binaries, so you likely won't have a full set. + +The build copies the successfully compressed binaries back to `./dist/`. diff --git a/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go new file mode 100644 index 000000000..54853d02b --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go @@ -0,0 +1,453 @@ +package gherkin + +import ( + "github.com/cucumber/messages/go/v21" + "strings" +) + +type AstBuilder interface { + Builder + GetGherkinDocument() *messages.GherkinDocument +} + +type astBuilder struct { + stack []*astNode + comments []*messages.Comment + newId func() string +} + +func (t *astBuilder) Reset() { + t.comments = []*messages.Comment{} + t.stack = []*astNode{} + t.push(newAstNode(RuleTypeNone)) +} + +func (t *astBuilder) GetGherkinDocument() *messages.GherkinDocument { + res := t.currentNode().getSingle(RuleTypeGherkinDocument, nil) + if val, ok := res.(*messages.GherkinDocument); ok { + return val + } + return nil +} + +type astNode struct { + ruleType RuleType + subNodes map[RuleType][]interface{} +} + +func (a *astNode) add(rt RuleType, obj interface{}) { + a.subNodes[rt] = append(a.subNodes[rt], obj) +} + +func (a *astNode) getSingle(rt RuleType, defaultValue interface{}) interface{} { + if val, ok := a.subNodes[rt]; ok { + for i := range val { + return val[i] + } + } + return defaultValue +} + +func (a *astNode) getItems(rt RuleType) []interface{} { + var res []interface{} + if val, ok := a.subNodes[rt]; ok { + for i := range val { + res = append(res, val[i]) + } + } + return res +} + +func (a *astNode) getToken(tt TokenType) *Token { + if val, ok := a.getSingle(tt.RuleType(), nil).(*Token); ok { + return val + } + return nil +} + +func (a *astNode) getTokens(tt TokenType) []*Token { + var items = a.getItems(tt.RuleType()) + var tokens []*Token + for i := range items { + if val, ok := items[i].(*Token); ok { + tokens = append(tokens, val) + } + } + return tokens +} + +func (t *astBuilder) currentNode() *astNode { + if len(t.stack) > 0 { + return t.stack[len(t.stack)-1] + } + return nil +} + +func newAstNode(rt RuleType) *astNode { + return &astNode{ + ruleType: rt, + subNodes: make(map[RuleType][]interface{}), + } +} + +func NewAstBuilder(newId func() string) AstBuilder { + builder := new(astBuilder) + builder.newId = newId + builder.comments = []*messages.Comment{} + builder.push(newAstNode(RuleTypeNone)) + return builder +} + +func (t *astBuilder) push(n *astNode) { + t.stack = append(t.stack, n) +} + +func (t *astBuilder) pop() *astNode { + x := t.stack[len(t.stack)-1] + t.stack = t.stack[:len(t.stack)-1] + return x +} + +func (t *astBuilder) Build(tok *Token) (bool, error) { + if tok.Type == TokenTypeComment { + comment := &messages.Comment{ + Location: astLocation(tok), + Text: tok.Text, + } + t.comments = append(t.comments, comment) + } else { + t.currentNode().add(tok.Type.RuleType(), tok) + } + return true, nil +} + +func (t *astBuilder) StartRule(r RuleType) (bool, error) { + t.push(newAstNode(r)) + return true, nil +} + +func (t *astBuilder) EndRule(r RuleType) (bool, error) { + node := t.pop() + transformedNode, err := t.transformNode(node) + t.currentNode().add(node.ruleType, transformedNode) + return true, err +} + +func (t *astBuilder) transformNode(node *astNode) (interface{}, error) { + switch node.ruleType { + + case RuleTypeStep: + stepLine := node.getToken(TokenTypeStepLine) + + step := &messages.Step{ + Location: astLocation(stepLine), + Keyword: stepLine.Keyword, + KeywordType: stepLine.KeywordType, + Text: stepLine.Text, + Id: t.newId(), + } + dataTable := node.getSingle(RuleTypeDataTable, nil) + if dataTable != nil { + step.DataTable = dataTable.(*messages.DataTable) + } else { + docString := node.getSingle(RuleTypeDocString, nil) + if docString != nil { + step.DocString = docString.(*messages.DocString) + } + } + + return step, nil + + case RuleTypeDocString: + separatorToken := node.getToken(TokenTypeDocStringSeparator) + lineTokens := node.getTokens(TokenTypeOther) + var text string + for i := range lineTokens { + if i > 0 { + text += "\n" + } + text += lineTokens[i].Text + } + docString := &messages.DocString{ + Location: astLocation(separatorToken), + Content: text, + Delimiter: separatorToken.Keyword, + } + if len(separatorToken.Text) > 0 { + docString.MediaType = separatorToken.Text + } + + return docString, nil + + case RuleTypeDataTable: + rows, err := astTableRows(node, t.newId) + dt := &messages.DataTable{ + Location: rows[0].Location, + Rows: rows, + } + return dt, err + + case RuleTypeBackground: + backgroundLine := node.getToken(TokenTypeBackgroundLine) + bg := &messages.Background{ + Id: t.newId(), + Location: astLocation(backgroundLine), + Keyword: backgroundLine.Keyword, + Name: backgroundLine.Text, + Description: getDescription(node), + Steps: astSteps(node), + } + return bg, nil + + case RuleTypeScenarioDefinition: + scenarioNode := node.getSingle(RuleTypeScenario, nil).(*astNode) + scenarioLine := scenarioNode.getToken(TokenTypeScenarioLine) + tags := astTags(node, t.newId) + sc := &messages.Scenario{ + Id: t.newId(), + Tags: tags, + Location: astLocation(scenarioLine), + Keyword: scenarioLine.Keyword, + Name: scenarioLine.Text, + Description: getDescription(scenarioNode), + Steps: astSteps(scenarioNode), + Examples: astExamples(scenarioNode), + } + + return sc, nil + + case RuleTypeExamplesDefinition: + tags := astTags(node, t.newId) + examplesNode := node.getSingle(RuleTypeExamples, nil).(*astNode) + examplesLine := examplesNode.getToken(TokenTypeExamplesLine) + examplesTable := examplesNode.getSingle(RuleTypeExamplesTable, make([]*messages.TableRow, 0)).([]*messages.TableRow) + + var tableHeader *messages.TableRow + var tableBody []*messages.TableRow + + if len(examplesTable) > 0 { + tableHeader = examplesTable[0] + tableBody = examplesTable[1:] + } else { + tableHeader = nil + tableBody = examplesTable + } + + ex := &messages.Examples{ + Id: t.newId(), + Tags: tags, + Location: astLocation(examplesLine), + Keyword: examplesLine.Keyword, + Name: examplesLine.Text, + Description: getDescription(examplesNode), + TableHeader: tableHeader, + TableBody: tableBody, + } + return ex, nil + + case RuleTypeExamplesTable: + allRows, err := astTableRows(node, t.newId) + return allRows, err + + case RuleTypeDescription: + lineTokens := node.getTokens(TokenTypeOther) + // Trim trailing empty lines + end := len(lineTokens) + for end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == "" { + end-- + } + var desc []string + for i := range lineTokens[0:end] { + desc = append(desc, lineTokens[i].Text) + } + return strings.Join(desc, "\n"), nil + + case RuleTypeFeature: + header := node.getSingle(RuleTypeFeatureHeader, nil).(*astNode) + tags := astTags(header, t.newId) + featureLine := header.getToken(TokenTypeFeatureLine) + if featureLine == nil { + return nil, nil + } + + children := make([]*messages.FeatureChild, 0) + background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background) + if background != nil { + children = append(children, &messages.FeatureChild{ + Background: background, + }) + } + scenarios := node.getItems(RuleTypeScenarioDefinition) + for i := range scenarios { + scenario := scenarios[i].(*messages.Scenario) + children = append(children, &messages.FeatureChild{ + Scenario: scenario, + }) + } + rules := node.getItems(RuleTypeRule) + for i := range rules { + rule := rules[i].(*messages.Rule) + children = append(children, &messages.FeatureChild{ + Rule: rule, + }) + } + + feature := &messages.Feature{ + Tags: tags, + Location: astLocation(featureLine), + Language: featureLine.GherkinDialect, + Keyword: featureLine.Keyword, + Name: featureLine.Text, + Description: getDescription(header), + Children: children, + } + return feature, nil + + case RuleTypeRule: + header := node.getSingle(RuleTypeRuleHeader, nil).(*astNode) + ruleLine := header.getToken(TokenTypeRuleLine) + if ruleLine == nil { + return nil, nil + } + + tags := astTags(header, t.newId) + var children []*messages.RuleChild + background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background) + + if background != nil { + children = append(children, &messages.RuleChild{ + Background: background, + }) + } + scenarios := node.getItems(RuleTypeScenarioDefinition) + for i := range scenarios { + scenario := scenarios[i].(*messages.Scenario) + children = append(children, &messages.RuleChild{ + Scenario: scenario, + }) + } + + rule := &messages.Rule{ + Id: t.newId(), + Location: astLocation(ruleLine), + Keyword: ruleLine.Keyword, + Name: ruleLine.Text, + Description: getDescription(header), + Children: children, + Tags: tags, + } + return rule, nil + + case RuleTypeGherkinDocument: + feature, _ := node.getSingle(RuleTypeFeature, nil).(*messages.Feature) + + doc := &messages.GherkinDocument{} + if feature != nil { + doc.Feature = feature + } + doc.Comments = t.comments + return doc, nil + } + return node, nil +} + +func getDescription(node *astNode) string { + return node.getSingle(RuleTypeDescription, "").(string) +} + +func astLocation(t *Token) *messages.Location { + return &messages.Location{ + Line: int64(t.Location.Line), + Column: int64(t.Location.Column), + } +} + +func astTableRows(t *astNode, newId func() string) (rows []*messages.TableRow, err error) { + rows = []*messages.TableRow{} + tokens := t.getTokens(TokenTypeTableRow) + for i := range tokens { + row := &messages.TableRow{ + Id: newId(), + Location: astLocation(tokens[i]), + Cells: astTableCells(tokens[i]), + } + rows = append(rows, row) + } + err = ensureCellCount(rows) + return +} + +func ensureCellCount(rows []*messages.TableRow) error { + if len(rows) <= 1 { + return nil + } + cellCount := len(rows[0].Cells) + for i := range rows { + if cellCount != len(rows[i].Cells) { + return &parseError{"inconsistent cell count within the table", &Location{ + Line: int(rows[i].Location.Line), + Column: int(rows[i].Location.Column), + }} + } + } + return nil +} + +func astTableCells(t *Token) (cells []*messages.TableCell) { + cells = []*messages.TableCell{} + for i := range t.Items { + item := t.Items[i] + cell := &messages.TableCell{} + cell.Location = &messages.Location{ + Line: int64(t.Location.Line), + Column: int64(item.Column), + } + cell.Value = item.Text + cells = append(cells, cell) + } + return +} + +func astSteps(t *astNode) (steps []*messages.Step) { + steps = []*messages.Step{} + tokens := t.getItems(RuleTypeStep) + for i := range tokens { + step, _ := tokens[i].(*messages.Step) + steps = append(steps, step) + } + return +} + +func astExamples(t *astNode) (examples []*messages.Examples) { + examples = []*messages.Examples{} + tokens := t.getItems(RuleTypeExamplesDefinition) + for i := range tokens { + example, _ := tokens[i].(*messages.Examples) + examples = append(examples, example) + } + return +} + +func astTags(node *astNode, newId func() string) (tags []*messages.Tag) { + tags = []*messages.Tag{} + tagsNode, ok := node.getSingle(RuleTypeTags, nil).(*astNode) + if !ok { + return + } + tokens := tagsNode.getTokens(TokenTypeTagLine) + for i := range tokens { + token := tokens[i] + for k := range token.Items { + item := token.Items[k] + tag := &messages.Tag{} + tag.Location = &messages.Location{ + Line: int64(token.Location.Line), + Column: int64(item.Column), + } + tag.Name = item.Text + tag.Id = newId() + tags = append(tags, tag) + } + } + return +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialect.go b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go new file mode 100644 index 000000000..212df62b2 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go @@ -0,0 +1,58 @@ +package gherkin + +import messages "github.com/cucumber/messages/go/v21" + +type Dialect struct { + Language string + Name string + Native string + Keywords map[string][]string + KeywordTypes map[string]messages.StepKeywordType +} + +func (g *Dialect) FeatureKeywords() []string { + return g.Keywords["feature"] +} + +func (g *Dialect) RuleKeywords() []string { + return g.Keywords["rule"] +} + +func (g *Dialect) ScenarioKeywords() []string { + return g.Keywords["scenario"] +} + +func (g *Dialect) StepKeywords() []string { + result := g.Keywords["given"] + result = append(result, g.Keywords["when"]...) + result = append(result, g.Keywords["then"]...) + result = append(result, g.Keywords["and"]...) + result = append(result, g.Keywords["but"]...) + return result +} + +func (g *Dialect) BackgroundKeywords() []string { + return g.Keywords["background"] +} + +func (g *Dialect) ScenarioOutlineKeywords() []string { + return g.Keywords["scenarioOutline"] +} + +func (g *Dialect) ExamplesKeywords() []string { + return g.Keywords["examples"] +} + +func (g *Dialect) StepKeywordType(keyword string) messages.StepKeywordType { + return g.KeywordTypes[keyword] +} + +type DialectProvider interface { + GetDialect(language string) *Dialect +} + +type gherkinDialectMap map[string]*Dialect + +func (g gherkinDialectMap) GetDialect(language string) *Dialect { + return g[language] +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go new file mode 100644 index 000000000..2362612d8 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go @@ -0,0 +1,5229 @@ +package gherkin + +import messages "github.com/cucumber/messages/go/v21" + +// Builtin dialects for af (Afrikaans), am (Armenian), an (Aragonese), ar (Arabic), ast (Asturian), az (Azerbaijani), be (Belarusian), bg (Bulgarian), bm (Malay), bs (Bosnian), ca (Catalan), cs (Czech), cy-GB (Welsh), da (Danish), de (German), el (Greek), em (Emoji), en (English), en-Scouse (Scouse), en-au (Australian), en-lol (LOLCAT), en-old (Old English), en-pirate (Pirate), en-tx (Texas), eo (Esperanto), es (Spanish), et (Estonian), fa (Persian), fi (Finnish), fr (French), ga (Irish), gj (Gujarati), gl (Galician), he (Hebrew), hi (Hindi), hr (Croatian), ht (Creole), hu (Hungarian), id (Indonesian), is (Icelandic), it (Italian), ja (Japanese), jv (Javanese), ka (Georgian), kn (Kannada), ko (Korean), lt (Lithuanian), lu (Luxemburgish), lv (Latvian), mk-Cyrl (Macedonian), mk-Latn (Macedonian (Latin)), mn (Mongolian), ne (Nepali), nl (Dutch), no (Norwegian), pa (Panjabi), pl (Polish), pt (Portuguese), ro (Romanian), ru (Russian), sk (Slovak), sl (Slovenian), sr-Cyrl (Serbian), sr-Latn (Serbian (Latin)), sv (Swedish), ta (Tamil), th (Thai), te (Telugu), tlh (Klingon), tr (Turkish), tt (Tatar), uk (Ukrainian), ur (Urdu), uz (Uzbek), vi (Vietnamese), zh-CN (Chinese simplified), zh-TW (Chinese traditional), mr (Marathi), amh (Amharic) +func DialectsBuiltin() DialectProvider { + return builtinDialects +} + +const ( + feature = "feature" + rule = "rule" + background = "background" + scenario = "scenario" + scenarioOutline = "scenarioOutline" + examples = "examples" + given = "given" + when = "when" + then = "then" + and = "and" + but = "but" +) + +var builtinDialects = gherkinDialectMap{ + "af": &Dialect{ + "af", "Afrikaans", "Afrikaans", map[string][]string{ + feature: { + "Funksie", + "Besigheid Behoefte", + "Vermoë", + }, + rule: { + "Regel", + }, + background: { + "Agtergrond", + }, + scenario: { + "Voorbeeld", + "Situasie", + }, + scenarioOutline: { + "Situasie Uiteensetting", + }, + examples: { + "Voorbeelde", + }, + given: { + "* ", + "Gegewe ", + }, + when: { + "* ", + "Wanneer ", + }, + then: { + "* ", + "Dan ", + }, + and: { + "* ", + "En ", + }, + but: { + "* ", + "Maar ", + }, + }, + map[string]messages.StepKeywordType{ + "Gegewe ": messages.StepKeywordType_CONTEXT, + + "Wanneer ": messages.StepKeywordType_ACTION, + + "Dan ": messages.StepKeywordType_OUTCOME, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "Maar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "am": &Dialect{ + "am", "Armenian", "հայերեն", map[string][]string{ + feature: { + "Ֆունկցիոնալություն", + "Հատկություն", + }, + rule: { + "Rule", + }, + background: { + "Կոնտեքստ", + }, + scenario: { + "Օրինակ", + "Սցենար", + }, + scenarioOutline: { + "Սցենարի կառուցվացքը", + }, + examples: { + "Օրինակներ", + }, + given: { + "* ", + "Դիցուք ", + }, + when: { + "* ", + "Եթե ", + "Երբ ", + }, + then: { + "* ", + "Ապա ", + }, + and: { + "* ", + "Եվ ", + }, + but: { + "* ", + "Բայց ", + }, + }, + map[string]messages.StepKeywordType{ + "Դիցուք ": messages.StepKeywordType_CONTEXT, + + "Եթե ": messages.StepKeywordType_ACTION, + + "Երբ ": messages.StepKeywordType_ACTION, + + "Ապա ": messages.StepKeywordType_OUTCOME, + + "Եվ ": messages.StepKeywordType_CONJUNCTION, + + "Բայց ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "an": &Dialect{ + "an", "Aragonese", "Aragonés", map[string][]string{ + feature: { + "Caracteristica", + }, + rule: { + "Rule", + }, + background: { + "Antecedents", + }, + scenario: { + "Eixemplo", + "Caso", + }, + scenarioOutline: { + "Esquema del caso", + }, + examples: { + "Eixemplos", + }, + given: { + "* ", + "Dau ", + "Dada ", + "Daus ", + "Dadas ", + }, + when: { + "* ", + "Cuan ", + }, + then: { + "* ", + "Alavez ", + "Allora ", + "Antonces ", + }, + and: { + "* ", + "Y ", + "E ", + }, + but: { + "* ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dau ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Daus ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cuan ": messages.StepKeywordType_ACTION, + + "Alavez ": messages.StepKeywordType_OUTCOME, + + "Allora ": messages.StepKeywordType_OUTCOME, + + "Antonces ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ar": &Dialect{ + "ar", "Arabic", "العربية", map[string][]string{ + feature: { + "خاصية", + }, + rule: { + "Rule", + }, + background: { + "الخلفية", + }, + scenario: { + "مثال", + "سيناريو", + }, + scenarioOutline: { + "سيناريو مخطط", + }, + examples: { + "امثلة", + }, + given: { + "* ", + "بفرض ", + }, + when: { + "* ", + "متى ", + "عندما ", + }, + then: { + "* ", + "اذاً ", + "ثم ", + }, + and: { + "* ", + "و ", + }, + but: { + "* ", + "لكن ", + }, + }, + map[string]messages.StepKeywordType{ + "بفرض ": messages.StepKeywordType_CONTEXT, + + "متى ": messages.StepKeywordType_ACTION, + + "عندما ": messages.StepKeywordType_ACTION, + + "اذاً ": messages.StepKeywordType_OUTCOME, + + "ثم ": messages.StepKeywordType_OUTCOME, + + "و ": messages.StepKeywordType_CONJUNCTION, + + "لكن ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ast": &Dialect{ + "ast", "Asturian", "asturianu", map[string][]string{ + feature: { + "Carauterística", + }, + rule: { + "Rule", + }, + background: { + "Antecedentes", + }, + scenario: { + "Exemplo", + "Casu", + }, + scenarioOutline: { + "Esbozu del casu", + }, + examples: { + "Exemplos", + }, + given: { + "* ", + "Dáu ", + "Dada ", + "Daos ", + "Daes ", + }, + when: { + "* ", + "Cuando ", + }, + then: { + "* ", + "Entós ", + }, + and: { + "* ", + "Y ", + "Ya ", + }, + but: { + "* ", + "Peru ", + }, + }, + map[string]messages.StepKeywordType{ + "Dáu ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Daos ": messages.StepKeywordType_CONTEXT, + + "Daes ": messages.StepKeywordType_CONTEXT, + + "Cuando ": messages.StepKeywordType_ACTION, + + "Entós ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "Ya ": messages.StepKeywordType_CONJUNCTION, + + "Peru ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "az": &Dialect{ + "az", "Azerbaijani", "Azərbaycanca", map[string][]string{ + feature: { + "Özəllik", + }, + rule: { + "Rule", + }, + background: { + "Keçmiş", + "Kontekst", + }, + scenario: { + "Nümunə", + "Ssenari", + }, + scenarioOutline: { + "Ssenarinin strukturu", + }, + examples: { + "Nümunələr", + }, + given: { + "* ", + "Tutaq ki ", + "Verilir ", + }, + when: { + "* ", + "Əgər ", + "Nə vaxt ki ", + }, + then: { + "* ", + "O halda ", + }, + and: { + "* ", + "Və ", + "Həm ", + }, + but: { + "* ", + "Amma ", + "Ancaq ", + }, + }, + map[string]messages.StepKeywordType{ + "Tutaq ki ": messages.StepKeywordType_CONTEXT, + + "Verilir ": messages.StepKeywordType_CONTEXT, + + "Əgər ": messages.StepKeywordType_ACTION, + + "Nə vaxt ki ": messages.StepKeywordType_ACTION, + + "O halda ": messages.StepKeywordType_OUTCOME, + + "Və ": messages.StepKeywordType_CONJUNCTION, + + "Həm ": messages.StepKeywordType_CONJUNCTION, + + "Amma ": messages.StepKeywordType_CONJUNCTION, + + "Ancaq ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "be": &Dialect{ + "be", "Belarusian", "Беларуская", map[string][]string{ + feature: { + "Функцыянальнасць", + "Фіча", + }, + rule: { + "Правілы", + }, + background: { + "Кантэкст", + }, + scenario: { + "Сцэнарый", + "Cцэнар", + }, + scenarioOutline: { + "Шаблон сцэнарыя", + "Узор сцэнара", + }, + examples: { + "Прыклады", + }, + given: { + "* ", + "Няхай ", + "Дадзена ", + }, + when: { + "* ", + "Калі ", + }, + then: { + "* ", + "Тады ", + }, + and: { + "* ", + "I ", + "Ды ", + "Таксама ", + }, + but: { + "* ", + "Але ", + "Інакш ", + }, + }, + map[string]messages.StepKeywordType{ + "Няхай ": messages.StepKeywordType_CONTEXT, + + "Дадзена ": messages.StepKeywordType_CONTEXT, + + "Калі ": messages.StepKeywordType_ACTION, + + "Тады ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ды ": messages.StepKeywordType_CONJUNCTION, + + "Таксама ": messages.StepKeywordType_CONJUNCTION, + + "Але ": messages.StepKeywordType_CONJUNCTION, + + "Інакш ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bg": &Dialect{ + "bg", "Bulgarian", "български", map[string][]string{ + feature: { + "Функционалност", + }, + rule: { + "Правило", + }, + background: { + "Предистория", + }, + scenario: { + "Пример", + "Сценарий", + }, + scenarioOutline: { + "Рамка на сценарий", + }, + examples: { + "Примери", + }, + given: { + "* ", + "Дадено ", + }, + when: { + "* ", + "Когато ", + }, + then: { + "* ", + "То ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Но ", + }, + }, + map[string]messages.StepKeywordType{ + "Дадено ": messages.StepKeywordType_CONTEXT, + + "Когато ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bm": &Dialect{ + "bm", "Malay", "Bahasa Melayu", map[string][]string{ + feature: { + "Fungsi", + }, + rule: { + "Rule", + }, + background: { + "Latar Belakang", + }, + scenario: { + "Senario", + "Situasi", + "Keadaan", + }, + scenarioOutline: { + "Kerangka Senario", + "Kerangka Situasi", + "Kerangka Keadaan", + "Garis Panduan Senario", + }, + examples: { + "Contoh", + }, + given: { + "* ", + "Diberi ", + "Bagi ", + }, + when: { + "* ", + "Apabila ", + }, + then: { + "* ", + "Maka ", + "Kemudian ", + }, + and: { + "* ", + "Dan ", + }, + but: { + "* ", + "Tetapi ", + "Tapi ", + }, + }, + map[string]messages.StepKeywordType{ + "Diberi ": messages.StepKeywordType_CONTEXT, + + "Bagi ": messages.StepKeywordType_CONTEXT, + + "Apabila ": messages.StepKeywordType_ACTION, + + "Maka ": messages.StepKeywordType_OUTCOME, + + "Kemudian ": messages.StepKeywordType_OUTCOME, + + "Dan ": messages.StepKeywordType_CONJUNCTION, + + "Tetapi ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bs": &Dialect{ + "bs", "Bosnian", "Bosanski", map[string][]string{ + feature: { + "Karakteristika", + }, + rule: { + "Rule", + }, + background: { + "Pozadina", + }, + scenario: { + "Primjer", + "Scenariju", + "Scenario", + }, + scenarioOutline: { + "Scenariju-obris", + "Scenario-outline", + }, + examples: { + "Primjeri", + }, + given: { + "* ", + "Dato ", + }, + when: { + "* ", + "Kada ", + }, + then: { + "* ", + "Zatim ", + }, + and: { + "* ", + "I ", + "A ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Dato ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Zatim ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ca": &Dialect{ + "ca", "Catalan", "català", map[string][]string{ + feature: { + "Característica", + "Funcionalitat", + }, + rule: { + "Rule", + }, + background: { + "Rerefons", + "Antecedents", + }, + scenario: { + "Exemple", + "Escenari", + }, + scenarioOutline: { + "Esquema de l'escenari", + }, + examples: { + "Exemples", + }, + given: { + "* ", + "Donat ", + "Donada ", + "Atès ", + "Atesa ", + }, + when: { + "* ", + "Quan ", + }, + then: { + "* ", + "Aleshores ", + "Cal ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Però ", + }, + }, + map[string]messages.StepKeywordType{ + "Donat ": messages.StepKeywordType_CONTEXT, + + "Donada ": messages.StepKeywordType_CONTEXT, + + "Atès ": messages.StepKeywordType_CONTEXT, + + "Atesa ": messages.StepKeywordType_CONTEXT, + + "Quan ": messages.StepKeywordType_ACTION, + + "Aleshores ": messages.StepKeywordType_OUTCOME, + + "Cal ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Però ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "cs": &Dialect{ + "cs", "Czech", "Česky", map[string][]string{ + feature: { + "Požadavek", + }, + rule: { + "Pravidlo", + }, + background: { + "Pozadí", + "Kontext", + }, + scenario: { + "Příklad", + "Scénář", + }, + scenarioOutline: { + "Náčrt Scénáře", + "Osnova scénáře", + }, + examples: { + "Příklady", + }, + given: { + "* ", + "Pokud ", + "Za předpokladu ", + }, + when: { + "* ", + "Když ", + }, + then: { + "* ", + "Pak ", + }, + and: { + "* ", + "A také ", + "A ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Pokud ": messages.StepKeywordType_CONTEXT, + + "Za předpokladu ": messages.StepKeywordType_CONTEXT, + + "Když ": messages.StepKeywordType_ACTION, + + "Pak ": messages.StepKeywordType_OUTCOME, + + "A také ": messages.StepKeywordType_CONJUNCTION, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "cy-GB": &Dialect{ + "cy-GB", "Welsh", "Cymraeg", map[string][]string{ + feature: { + "Arwedd", + }, + rule: { + "Rule", + }, + background: { + "Cefndir", + }, + scenario: { + "Enghraifft", + "Scenario", + }, + scenarioOutline: { + "Scenario Amlinellol", + }, + examples: { + "Enghreifftiau", + }, + given: { + "* ", + "Anrhegedig a ", + }, + when: { + "* ", + "Pryd ", + }, + then: { + "* ", + "Yna ", + }, + and: { + "* ", + "A ", + }, + but: { + "* ", + "Ond ", + }, + }, + map[string]messages.StepKeywordType{ + "Anrhegedig a ": messages.StepKeywordType_CONTEXT, + + "Pryd ": messages.StepKeywordType_ACTION, + + "Yna ": messages.StepKeywordType_OUTCOME, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ond ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "da": &Dialect{ + "da", "Danish", "dansk", map[string][]string{ + feature: { + "Egenskab", + }, + rule: { + "Rule", + }, + background: { + "Baggrund", + }, + scenario: { + "Eksempel", + "Scenarie", + }, + scenarioOutline: { + "Abstrakt Scenario", + }, + examples: { + "Eksempler", + }, + given: { + "* ", + "Givet ", + }, + when: { + "* ", + "Når ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Givet ": messages.StepKeywordType_CONTEXT, + + "Når ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "de": &Dialect{ + "de", "German", "Deutsch", map[string][]string{ + feature: { + "Funktionalität", + "Funktion", + }, + rule: { + "Rule", + "Regel", + }, + background: { + "Grundlage", + "Hintergrund", + "Voraussetzungen", + "Vorbedingungen", + }, + scenario: { + "Beispiel", + "Szenario", + }, + scenarioOutline: { + "Szenariogrundriss", + "Szenarien", + }, + examples: { + "Beispiele", + }, + given: { + "* ", + "Angenommen ", + "Gegeben sei ", + "Gegeben seien ", + }, + when: { + "* ", + "Wenn ", + }, + then: { + "* ", + "Dann ", + }, + and: { + "* ", + "Und ", + }, + but: { + "* ", + "Aber ", + }, + }, + map[string]messages.StepKeywordType{ + "Angenommen ": messages.StepKeywordType_CONTEXT, + + "Gegeben sei ": messages.StepKeywordType_CONTEXT, + + "Gegeben seien ": messages.StepKeywordType_CONTEXT, + + "Wenn ": messages.StepKeywordType_ACTION, + + "Dann ": messages.StepKeywordType_OUTCOME, + + "Und ": messages.StepKeywordType_CONJUNCTION, + + "Aber ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "el": &Dialect{ + "el", "Greek", "Ελληνικά", map[string][]string{ + feature: { + "Δυνατότητα", + "Λειτουργία", + }, + rule: { + "Rule", + }, + background: { + "Υπόβαθρο", + }, + scenario: { + "Παράδειγμα", + "Σενάριο", + }, + scenarioOutline: { + "Περιγραφή Σεναρίου", + "Περίγραμμα Σεναρίου", + }, + examples: { + "Παραδείγματα", + "Σενάρια", + }, + given: { + "* ", + "Δεδομένου ", + }, + when: { + "* ", + "Όταν ", + }, + then: { + "* ", + "Τότε ", + }, + and: { + "* ", + "Και ", + }, + but: { + "* ", + "Αλλά ", + }, + }, + map[string]messages.StepKeywordType{ + "Δεδομένου ": messages.StepKeywordType_CONTEXT, + + "Όταν ": messages.StepKeywordType_ACTION, + + "Τότε ": messages.StepKeywordType_OUTCOME, + + "Και ": messages.StepKeywordType_CONJUNCTION, + + "Αλλά ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "em": &Dialect{ + "em", "Emoji", "😀", map[string][]string{ + feature: { + "📚", + }, + rule: { + "Rule", + }, + background: { + "💤", + }, + scenario: { + "🥒", + "📕", + }, + scenarioOutline: { + "📖", + }, + examples: { + "📓", + }, + given: { + "* ", + "😐", + }, + when: { + "* ", + "🎬", + }, + then: { + "* ", + "🙏", + }, + and: { + "* ", + "😂", + }, + but: { + "* ", + "😔", + }, + }, + map[string]messages.StepKeywordType{ + "😐": messages.StepKeywordType_CONTEXT, + + "🎬": messages.StepKeywordType_ACTION, + + "🙏": messages.StepKeywordType_OUTCOME, + + "😂": messages.StepKeywordType_CONJUNCTION, + + "😔": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en": &Dialect{ + "en", "English", "English", map[string][]string{ + feature: { + "Feature", + "Business Need", + "Ability", + }, + rule: { + "Rule", + }, + background: { + "Background", + }, + scenario: { + "Example", + "Scenario", + }, + scenarioOutline: { + "Scenario Outline", + "Scenario Template", + }, + examples: { + "Examples", + "Scenarios", + }, + given: { + "* ", + "Given ", + }, + when: { + "* ", + "When ", + }, + then: { + "* ", + "Then ", + }, + and: { + "* ", + "And ", + }, + but: { + "* ", + "But ", + }, + }, + map[string]messages.StepKeywordType{ + "Given ": messages.StepKeywordType_CONTEXT, + + "When ": messages.StepKeywordType_ACTION, + + "Then ": messages.StepKeywordType_OUTCOME, + + "And ": messages.StepKeywordType_CONJUNCTION, + + "But ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-Scouse": &Dialect{ + "en-Scouse", "Scouse", "Scouse", map[string][]string{ + feature: { + "Feature", + }, + rule: { + "Rule", + }, + background: { + "Dis is what went down", + }, + scenario: { + "The thing of it is", + }, + scenarioOutline: { + "Wharrimean is", + }, + examples: { + "Examples", + }, + given: { + "* ", + "Givun ", + "Youse know when youse got ", + }, + when: { + "* ", + "Wun ", + "Youse know like when ", + }, + then: { + "* ", + "Dun ", + "Den youse gotta ", + }, + and: { + "* ", + "An ", + }, + but: { + "* ", + "Buh ", + }, + }, + map[string]messages.StepKeywordType{ + "Givun ": messages.StepKeywordType_CONTEXT, + + "Youse know when youse got ": messages.StepKeywordType_CONTEXT, + + "Wun ": messages.StepKeywordType_ACTION, + + "Youse know like when ": messages.StepKeywordType_ACTION, + + "Dun ": messages.StepKeywordType_OUTCOME, + + "Den youse gotta ": messages.StepKeywordType_OUTCOME, + + "An ": messages.StepKeywordType_CONJUNCTION, + + "Buh ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-au": &Dialect{ + "en-au", "Australian", "Australian", map[string][]string{ + feature: { + "Pretty much", + }, + rule: { + "Rule", + }, + background: { + "First off", + }, + scenario: { + "Awww, look mate", + }, + scenarioOutline: { + "Reckon it's like", + }, + examples: { + "You'll wanna", + }, + given: { + "* ", + "Y'know ", + }, + when: { + "* ", + "It's just unbelievable ", + }, + then: { + "* ", + "But at the end of the day I reckon ", + }, + and: { + "* ", + "Too right ", + }, + but: { + "* ", + "Yeah nah ", + }, + }, + map[string]messages.StepKeywordType{ + "Y'know ": messages.StepKeywordType_CONTEXT, + + "It's just unbelievable ": messages.StepKeywordType_ACTION, + + "But at the end of the day I reckon ": messages.StepKeywordType_OUTCOME, + + "Too right ": messages.StepKeywordType_CONJUNCTION, + + "Yeah nah ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-lol": &Dialect{ + "en-lol", "LOLCAT", "LOLCAT", map[string][]string{ + feature: { + "OH HAI", + }, + rule: { + "Rule", + }, + background: { + "B4", + }, + scenario: { + "MISHUN", + }, + scenarioOutline: { + "MISHUN SRSLY", + }, + examples: { + "EXAMPLZ", + }, + given: { + "* ", + "I CAN HAZ ", + }, + when: { + "* ", + "WEN ", + }, + then: { + "* ", + "DEN ", + }, + and: { + "* ", + "AN ", + }, + but: { + "* ", + "BUT ", + }, + }, + map[string]messages.StepKeywordType{ + "I CAN HAZ ": messages.StepKeywordType_CONTEXT, + + "WEN ": messages.StepKeywordType_ACTION, + + "DEN ": messages.StepKeywordType_OUTCOME, + + "AN ": messages.StepKeywordType_CONJUNCTION, + + "BUT ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-old": &Dialect{ + "en-old", "Old English", "Englisc", map[string][]string{ + feature: { + "Hwaet", + "Hwæt", + }, + rule: { + "Rule", + }, + background: { + "Aer", + "Ær", + }, + scenario: { + "Swa", + }, + scenarioOutline: { + "Swa hwaer swa", + "Swa hwær swa", + }, + examples: { + "Se the", + "Se þe", + "Se ðe", + }, + given: { + "* ", + "Thurh ", + "Þurh ", + "Ðurh ", + }, + when: { + "* ", + "Bæþsealf ", + "Bæþsealfa ", + "Bæþsealfe ", + "Ciricæw ", + "Ciricæwe ", + "Ciricæwa ", + }, + then: { + "* ", + "Tha ", + "Þa ", + "Ða ", + "Tha the ", + "Þa þe ", + "Ða ðe ", + }, + and: { + "* ", + "Ond ", + "7 ", + }, + but: { + "* ", + "Ac ", + }, + }, + map[string]messages.StepKeywordType{ + "Thurh ": messages.StepKeywordType_CONTEXT, + + "Þurh ": messages.StepKeywordType_CONTEXT, + + "Ðurh ": messages.StepKeywordType_CONTEXT, + + "Bæþsealf ": messages.StepKeywordType_ACTION, + + "Bæþsealfa ": messages.StepKeywordType_ACTION, + + "Bæþsealfe ": messages.StepKeywordType_ACTION, + + "Ciricæw ": messages.StepKeywordType_ACTION, + + "Ciricæwe ": messages.StepKeywordType_ACTION, + + "Ciricæwa ": messages.StepKeywordType_ACTION, + + "Tha ": messages.StepKeywordType_OUTCOME, + + "Þa ": messages.StepKeywordType_OUTCOME, + + "Ða ": messages.StepKeywordType_OUTCOME, + + "Tha the ": messages.StepKeywordType_OUTCOME, + + "Þa þe ": messages.StepKeywordType_OUTCOME, + + "Ða ðe ": messages.StepKeywordType_OUTCOME, + + "Ond ": messages.StepKeywordType_CONJUNCTION, + + "7 ": messages.StepKeywordType_CONJUNCTION, + + "Ac ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-pirate": &Dialect{ + "en-pirate", "Pirate", "Pirate", map[string][]string{ + feature: { + "Ahoy matey!", + }, + rule: { + "Rule", + }, + background: { + "Yo-ho-ho", + }, + scenario: { + "Heave to", + }, + scenarioOutline: { + "Shiver me timbers", + }, + examples: { + "Dead men tell no tales", + }, + given: { + "* ", + "Gangway! ", + }, + when: { + "* ", + "Blimey! ", + }, + then: { + "* ", + "Let go and haul ", + }, + and: { + "* ", + "Aye ", + }, + but: { + "* ", + "Avast! ", + }, + }, + map[string]messages.StepKeywordType{ + "Gangway! ": messages.StepKeywordType_CONTEXT, + + "Blimey! ": messages.StepKeywordType_ACTION, + + "Let go and haul ": messages.StepKeywordType_OUTCOME, + + "Aye ": messages.StepKeywordType_CONJUNCTION, + + "Avast! ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-tx": &Dialect{ + "en-tx", "Texas", "Texas", map[string][]string{ + feature: { + "This ain’t my first rodeo", + "All gussied up", + }, + rule: { + "Rule ", + }, + background: { + "Lemme tell y'all a story", + }, + scenario: { + "All hat and no cattle", + }, + scenarioOutline: { + "Serious as a snake bite", + "Busy as a hound in flea season", + }, + examples: { + "Now that's a story longer than a cattle drive in July", + }, + given: { + "Fixin' to ", + "All git out ", + }, + when: { + "Quick out of the chute ", + }, + then: { + "There’s no tree but bears some fruit ", + }, + and: { + "Come hell or high water ", + }, + but: { + "Well now hold on, I'll you what ", + }, + }, + map[string]messages.StepKeywordType{ + "Fixin' to ": messages.StepKeywordType_CONTEXT, + + "All git out ": messages.StepKeywordType_CONTEXT, + + "Quick out of the chute ": messages.StepKeywordType_ACTION, + + "There’s no tree but bears some fruit ": messages.StepKeywordType_OUTCOME, + + "Come hell or high water ": messages.StepKeywordType_CONJUNCTION, + + "Well now hold on, I'll you what ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "eo": &Dialect{ + "eo", "Esperanto", "Esperanto", map[string][]string{ + feature: { + "Trajto", + }, + rule: { + "Rule", + }, + background: { + "Fono", + }, + scenario: { + "Ekzemplo", + "Scenaro", + "Kazo", + }, + scenarioOutline: { + "Konturo de la scenaro", + "Skizo", + "Kazo-skizo", + }, + examples: { + "Ekzemploj", + }, + given: { + "* ", + "Donitaĵo ", + "Komence ", + }, + when: { + "* ", + "Se ", + }, + then: { + "* ", + "Do ", + }, + and: { + "* ", + "Kaj ", + }, + but: { + "* ", + "Sed ", + }, + }, + map[string]messages.StepKeywordType{ + "Donitaĵo ": messages.StepKeywordType_CONTEXT, + + "Komence ": messages.StepKeywordType_CONTEXT, + + "Se ": messages.StepKeywordType_ACTION, + + "Do ": messages.StepKeywordType_OUTCOME, + + "Kaj ": messages.StepKeywordType_CONJUNCTION, + + "Sed ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "es": &Dialect{ + "es", "Spanish", "español", map[string][]string{ + feature: { + "Característica", + "Necesidad del negocio", + "Requisito", + }, + rule: { + "Regla", + "Regla de negocio", + }, + background: { + "Antecedentes", + }, + scenario: { + "Ejemplo", + "Escenario", + }, + scenarioOutline: { + "Esquema del escenario", + }, + examples: { + "Ejemplos", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Cuando ", + }, + then: { + "* ", + "Entonces ", + }, + and: { + "* ", + "Y ", + "E ", + }, + but: { + "* ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cuando ": messages.StepKeywordType_ACTION, + + "Entonces ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "et": &Dialect{ + "et", "Estonian", "eesti keel", map[string][]string{ + feature: { + "Omadus", + }, + rule: { + "Reegel", + }, + background: { + "Taust", + }, + scenario: { + "Juhtum", + "Stsenaarium", + }, + scenarioOutline: { + "Raamjuhtum", + "Raamstsenaarium", + }, + examples: { + "Juhtumid", + }, + given: { + "* ", + "Eeldades ", + }, + when: { + "* ", + "Kui ", + }, + then: { + "* ", + "Siis ", + }, + and: { + "* ", + "Ja ", + }, + but: { + "* ", + "Kuid ", + }, + }, + map[string]messages.StepKeywordType{ + "Eeldades ": messages.StepKeywordType_CONTEXT, + + "Kui ": messages.StepKeywordType_ACTION, + + "Siis ": messages.StepKeywordType_OUTCOME, + + "Ja ": messages.StepKeywordType_CONJUNCTION, + + "Kuid ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fa": &Dialect{ + "fa", "Persian", "فارسی", map[string][]string{ + feature: { + "وِیژگی", + }, + rule: { + "Rule", + }, + background: { + "زمینه", + }, + scenario: { + "مثال", + "سناریو", + }, + scenarioOutline: { + "الگوی سناریو", + }, + examples: { + "نمونه ها", + }, + given: { + "* ", + "با فرض ", + }, + when: { + "* ", + "هنگامی ", + }, + then: { + "* ", + "آنگاه ", + }, + and: { + "* ", + "و ", + }, + but: { + "* ", + "اما ", + }, + }, + map[string]messages.StepKeywordType{ + "با فرض ": messages.StepKeywordType_CONTEXT, + + "هنگامی ": messages.StepKeywordType_ACTION, + + "آنگاه ": messages.StepKeywordType_OUTCOME, + + "و ": messages.StepKeywordType_CONJUNCTION, + + "اما ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fi": &Dialect{ + "fi", "Finnish", "suomi", map[string][]string{ + feature: { + "Ominaisuus", + }, + rule: { + "Rule", + }, + background: { + "Tausta", + }, + scenario: { + "Tapaus", + }, + scenarioOutline: { + "Tapausaihio", + }, + examples: { + "Tapaukset", + }, + given: { + "* ", + "Oletetaan ", + }, + when: { + "* ", + "Kun ", + }, + then: { + "* ", + "Niin ", + }, + and: { + "* ", + "Ja ", + }, + but: { + "* ", + "Mutta ", + }, + }, + map[string]messages.StepKeywordType{ + "Oletetaan ": messages.StepKeywordType_CONTEXT, + + "Kun ": messages.StepKeywordType_ACTION, + + "Niin ": messages.StepKeywordType_OUTCOME, + + "Ja ": messages.StepKeywordType_CONJUNCTION, + + "Mutta ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fr": &Dialect{ + "fr", "French", "français", map[string][]string{ + feature: { + "Fonctionnalité", + }, + rule: { + "Règle", + }, + background: { + "Contexte", + }, + scenario: { + "Exemple", + "Scénario", + }, + scenarioOutline: { + "Plan du scénario", + "Plan du Scénario", + }, + examples: { + "Exemples", + }, + given: { + "* ", + "Soit ", + "Sachant que ", + "Sachant qu'", + "Sachant ", + "Etant donné que ", + "Etant donné qu'", + "Etant donné ", + "Etant donnée ", + "Etant donnés ", + "Etant données ", + "Étant donné que ", + "Étant donné qu'", + "Étant donné ", + "Étant donnée ", + "Étant donnés ", + "Étant données ", + }, + when: { + "* ", + "Quand ", + "Lorsque ", + "Lorsqu'", + }, + then: { + "* ", + "Alors ", + "Donc ", + }, + and: { + "* ", + "Et que ", + "Et qu'", + "Et ", + }, + but: { + "* ", + "Mais que ", + "Mais qu'", + "Mais ", + }, + }, + map[string]messages.StepKeywordType{ + "Soit ": messages.StepKeywordType_CONTEXT, + + "Sachant que ": messages.StepKeywordType_CONTEXT, + + "Sachant qu'": messages.StepKeywordType_CONTEXT, + + "Sachant ": messages.StepKeywordType_CONTEXT, + + "Etant donné que ": messages.StepKeywordType_CONTEXT, + + "Etant donné qu'": messages.StepKeywordType_CONTEXT, + + "Etant donné ": messages.StepKeywordType_CONTEXT, + + "Etant donnée ": messages.StepKeywordType_CONTEXT, + + "Etant donnés ": messages.StepKeywordType_CONTEXT, + + "Etant données ": messages.StepKeywordType_CONTEXT, + + "Étant donné que ": messages.StepKeywordType_CONTEXT, + + "Étant donné qu'": messages.StepKeywordType_CONTEXT, + + "Étant donné ": messages.StepKeywordType_CONTEXT, + + "Étant donnée ": messages.StepKeywordType_CONTEXT, + + "Étant donnés ": messages.StepKeywordType_CONTEXT, + + "Étant données ": messages.StepKeywordType_CONTEXT, + + "Quand ": messages.StepKeywordType_ACTION, + + "Lorsque ": messages.StepKeywordType_ACTION, + + "Lorsqu'": messages.StepKeywordType_ACTION, + + "Alors ": messages.StepKeywordType_OUTCOME, + + "Donc ": messages.StepKeywordType_OUTCOME, + + "Et que ": messages.StepKeywordType_CONJUNCTION, + + "Et qu'": messages.StepKeywordType_CONJUNCTION, + + "Et ": messages.StepKeywordType_CONJUNCTION, + + "Mais que ": messages.StepKeywordType_CONJUNCTION, + + "Mais qu'": messages.StepKeywordType_CONJUNCTION, + + "Mais ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ga": &Dialect{ + "ga", "Irish", "Gaeilge", map[string][]string{ + feature: { + "Gné", + }, + rule: { + "Rule", + }, + background: { + "Cúlra", + }, + scenario: { + "Sampla", + "Cás", + }, + scenarioOutline: { + "Cás Achomair", + }, + examples: { + "Samplaí", + }, + given: { + "* ", + "Cuir i gcás go", + "Cuir i gcás nach", + "Cuir i gcás gur", + "Cuir i gcás nár", + }, + when: { + "* ", + "Nuair a", + "Nuair nach", + "Nuair ba", + "Nuair nár", + }, + then: { + "* ", + "Ansin", + }, + and: { + "* ", + "Agus", + }, + but: { + "* ", + "Ach", + }, + }, + map[string]messages.StepKeywordType{ + "Cuir i gcás go": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás nach": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás gur": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás nár": messages.StepKeywordType_CONTEXT, + + "Nuair a": messages.StepKeywordType_ACTION, + + "Nuair nach": messages.StepKeywordType_ACTION, + + "Nuair ba": messages.StepKeywordType_ACTION, + + "Nuair nár": messages.StepKeywordType_ACTION, + + "Ansin": messages.StepKeywordType_OUTCOME, + + "Agus": messages.StepKeywordType_CONJUNCTION, + + "Ach": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "gj": &Dialect{ + "gj", "Gujarati", "ગુજરાતી", map[string][]string{ + feature: { + "લક્ષણ", + "વ્યાપાર જરૂર", + "ક્ષમતા", + }, + rule: { + "Rule", + }, + background: { + "બેકગ્રાઉન્ડ", + }, + scenario: { + "ઉદાહરણ", + "સ્થિતિ", + }, + scenarioOutline: { + "પરિદ્દશ્ય રૂપરેખા", + "પરિદ્દશ્ય ઢાંચો", + }, + examples: { + "ઉદાહરણો", + }, + given: { + "* ", + "આપેલ છે ", + }, + when: { + "* ", + "ક્યારે ", + }, + then: { + "* ", + "પછી ", + }, + and: { + "* ", + "અને ", + }, + but: { + "* ", + "પણ ", + }, + }, + map[string]messages.StepKeywordType{ + "આપેલ છે ": messages.StepKeywordType_CONTEXT, + + "ક્યારે ": messages.StepKeywordType_ACTION, + + "પછી ": messages.StepKeywordType_OUTCOME, + + "અને ": messages.StepKeywordType_CONJUNCTION, + + "પણ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "gl": &Dialect{ + "gl", "Galician", "galego", map[string][]string{ + feature: { + "Característica", + }, + rule: { + "Rule", + }, + background: { + "Contexto", + }, + scenario: { + "Exemplo", + "Escenario", + }, + scenarioOutline: { + "Esbozo do escenario", + }, + examples: { + "Exemplos", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Cando ", + }, + then: { + "* ", + "Entón ", + "Logo ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Mais ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cando ": messages.StepKeywordType_ACTION, + + "Entón ": messages.StepKeywordType_OUTCOME, + + "Logo ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Mais ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "he": &Dialect{ + "he", "Hebrew", "עברית", map[string][]string{ + feature: { + "תכונה", + }, + rule: { + "כלל", + }, + background: { + "רקע", + }, + scenario: { + "דוגמא", + "תרחיש", + }, + scenarioOutline: { + "תבנית תרחיש", + }, + examples: { + "דוגמאות", + }, + given: { + "* ", + "בהינתן ", + }, + when: { + "* ", + "כאשר ", + }, + then: { + "* ", + "אז ", + "אזי ", + }, + and: { + "* ", + "וגם ", + }, + but: { + "* ", + "אבל ", + }, + }, + map[string]messages.StepKeywordType{ + "בהינתן ": messages.StepKeywordType_CONTEXT, + + "כאשר ": messages.StepKeywordType_ACTION, + + "אז ": messages.StepKeywordType_OUTCOME, + + "אזי ": messages.StepKeywordType_OUTCOME, + + "וגם ": messages.StepKeywordType_CONJUNCTION, + + "אבל ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hi": &Dialect{ + "hi", "Hindi", "हिंदी", map[string][]string{ + feature: { + "रूप लेख", + }, + rule: { + "नियम", + }, + background: { + "पृष्ठभूमि", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + }, + given: { + "* ", + "अगर ", + "यदि ", + "चूंकि ", + }, + when: { + "* ", + "जब ", + "कदा ", + }, + then: { + "* ", + "तब ", + "तदा ", + }, + and: { + "* ", + "और ", + "तथा ", + }, + but: { + "* ", + "पर ", + "परन्तु ", + "किन्तु ", + }, + }, + map[string]messages.StepKeywordType{ + "अगर ": messages.StepKeywordType_CONTEXT, + + "यदि ": messages.StepKeywordType_CONTEXT, + + "चूंकि ": messages.StepKeywordType_CONTEXT, + + "जब ": messages.StepKeywordType_ACTION, + + "कदा ": messages.StepKeywordType_ACTION, + + "तब ": messages.StepKeywordType_OUTCOME, + + "तदा ": messages.StepKeywordType_OUTCOME, + + "और ": messages.StepKeywordType_CONJUNCTION, + + "तथा ": messages.StepKeywordType_CONJUNCTION, + + "पर ": messages.StepKeywordType_CONJUNCTION, + + "परन्तु ": messages.StepKeywordType_CONJUNCTION, + + "किन्तु ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hr": &Dialect{ + "hr", "Croatian", "hrvatski", map[string][]string{ + feature: { + "Osobina", + "Mogućnost", + "Mogucnost", + }, + rule: { + "Rule", + }, + background: { + "Pozadina", + }, + scenario: { + "Primjer", + "Scenarij", + }, + scenarioOutline: { + "Skica", + "Koncept", + }, + examples: { + "Primjeri", + "Scenariji", + }, + given: { + "* ", + "Zadan ", + "Zadani ", + "Zadano ", + "Ukoliko ", + }, + when: { + "* ", + "Kada ", + "Kad ", + }, + then: { + "* ", + "Onda ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Zadan ": messages.StepKeywordType_CONTEXT, + + "Zadani ": messages.StepKeywordType_CONTEXT, + + "Zadano ": messages.StepKeywordType_CONTEXT, + + "Ukoliko ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Kad ": messages.StepKeywordType_ACTION, + + "Onda ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ht": &Dialect{ + "ht", "Creole", "kreyòl", map[string][]string{ + feature: { + "Karakteristik", + "Mak", + "Fonksyonalite", + }, + rule: { + "Rule", + }, + background: { + "Kontèks", + "Istorik", + }, + scenario: { + "Senaryo", + }, + scenarioOutline: { + "Plan senaryo", + "Plan Senaryo", + "Senaryo deskripsyon", + "Senaryo Deskripsyon", + "Dyagram senaryo", + "Dyagram Senaryo", + }, + examples: { + "Egzanp", + }, + given: { + "* ", + "Sipoze ", + "Sipoze ke ", + "Sipoze Ke ", + }, + when: { + "* ", + "Lè ", + "Le ", + }, + then: { + "* ", + "Lè sa a ", + "Le sa a ", + }, + and: { + "* ", + "Ak ", + "Epi ", + "E ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Sipoze ": messages.StepKeywordType_CONTEXT, + + "Sipoze ke ": messages.StepKeywordType_CONTEXT, + + "Sipoze Ke ": messages.StepKeywordType_CONTEXT, + + "Lè ": messages.StepKeywordType_ACTION, + + "Le ": messages.StepKeywordType_ACTION, + + "Lè sa a ": messages.StepKeywordType_OUTCOME, + + "Le sa a ": messages.StepKeywordType_OUTCOME, + + "Ak ": messages.StepKeywordType_CONJUNCTION, + + "Epi ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hu": &Dialect{ + "hu", "Hungarian", "magyar", map[string][]string{ + feature: { + "Jellemző", + }, + rule: { + "Szabály", + }, + background: { + "Háttér", + }, + scenario: { + "Példa", + "Forgatókönyv", + }, + scenarioOutline: { + "Forgatókönyv vázlat", + }, + examples: { + "Példák", + }, + given: { + "* ", + "Amennyiben ", + "Adott ", + }, + when: { + "* ", + "Majd ", + "Ha ", + "Amikor ", + }, + then: { + "* ", + "Akkor ", + }, + and: { + "* ", + "És ", + }, + but: { + "* ", + "De ", + }, + }, + map[string]messages.StepKeywordType{ + "Amennyiben ": messages.StepKeywordType_CONTEXT, + + "Adott ": messages.StepKeywordType_CONTEXT, + + "Majd ": messages.StepKeywordType_ACTION, + + "Ha ": messages.StepKeywordType_ACTION, + + "Amikor ": messages.StepKeywordType_ACTION, + + "Akkor ": messages.StepKeywordType_OUTCOME, + + "És ": messages.StepKeywordType_CONJUNCTION, + + "De ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "id": &Dialect{ + "id", "Indonesian", "Bahasa Indonesia", map[string][]string{ + feature: { + "Fitur", + }, + rule: { + "Rule", + "Aturan", + }, + background: { + "Dasar", + "Latar Belakang", + }, + scenario: { + "Skenario", + }, + scenarioOutline: { + "Skenario konsep", + "Garis-Besar Skenario", + }, + examples: { + "Contoh", + "Misal", + }, + given: { + "* ", + "Dengan ", + "Diketahui ", + "Diasumsikan ", + "Bila ", + "Jika ", + }, + when: { + "* ", + "Ketika ", + }, + then: { + "* ", + "Maka ", + "Kemudian ", + }, + and: { + "* ", + "Dan ", + }, + but: { + "* ", + "Tapi ", + "Tetapi ", + }, + }, + map[string]messages.StepKeywordType{ + "Dengan ": messages.StepKeywordType_CONTEXT, + + "Diketahui ": messages.StepKeywordType_CONTEXT, + + "Diasumsikan ": messages.StepKeywordType_CONTEXT, + + "Bila ": messages.StepKeywordType_CONTEXT, + + "Jika ": messages.StepKeywordType_CONTEXT, + + "Ketika ": messages.StepKeywordType_ACTION, + + "Maka ": messages.StepKeywordType_OUTCOME, + + "Kemudian ": messages.StepKeywordType_OUTCOME, + + "Dan ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "Tetapi ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "is": &Dialect{ + "is", "Icelandic", "Íslenska", map[string][]string{ + feature: { + "Eiginleiki", + }, + rule: { + "Rule", + }, + background: { + "Bakgrunnur", + }, + scenario: { + "Atburðarás", + }, + scenarioOutline: { + "Lýsing Atburðarásar", + "Lýsing Dæma", + }, + examples: { + "Dæmi", + "Atburðarásir", + }, + given: { + "* ", + "Ef ", + }, + when: { + "* ", + "Þegar ", + }, + then: { + "* ", + "Þá ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "En ", + }, + }, + map[string]messages.StepKeywordType{ + "Ef ": messages.StepKeywordType_CONTEXT, + + "Þegar ": messages.StepKeywordType_ACTION, + + "Þá ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "it": &Dialect{ + "it", "Italian", "italiano", map[string][]string{ + feature: { + "Funzionalità", + "Esigenza di Business", + "Abilità", + }, + rule: { + "Regola", + }, + background: { + "Contesto", + }, + scenario: { + "Esempio", + "Scenario", + }, + scenarioOutline: { + "Schema dello scenario", + }, + examples: { + "Esempi", + }, + given: { + "* ", + "Dato ", + "Data ", + "Dati ", + "Date ", + }, + when: { + "* ", + "Quando ", + }, + then: { + "* ", + "Allora ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Ma ", + }, + }, + map[string]messages.StepKeywordType{ + "Dato ": messages.StepKeywordType_CONTEXT, + + "Data ": messages.StepKeywordType_CONTEXT, + + "Dati ": messages.StepKeywordType_CONTEXT, + + "Date ": messages.StepKeywordType_CONTEXT, + + "Quando ": messages.StepKeywordType_ACTION, + + "Allora ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Ma ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ja": &Dialect{ + "ja", "Japanese", "日本語", map[string][]string{ + feature: { + "フィーチャ", + "機能", + }, + rule: { + "ルール", + }, + background: { + "背景", + }, + scenario: { + "シナリオ", + }, + scenarioOutline: { + "シナリオアウトライン", + "シナリオテンプレート", + "テンプレ", + "シナリオテンプレ", + }, + examples: { + "例", + "サンプル", + }, + given: { + "* ", + "前提", + }, + when: { + "* ", + "もし", + }, + then: { + "* ", + "ならば", + }, + and: { + "* ", + "且つ", + "かつ", + }, + but: { + "* ", + "然し", + "しかし", + "但し", + "ただし", + }, + }, + map[string]messages.StepKeywordType{ + "前提": messages.StepKeywordType_CONTEXT, + + "もし": messages.StepKeywordType_ACTION, + + "ならば": messages.StepKeywordType_OUTCOME, + + "且つ": messages.StepKeywordType_CONJUNCTION, + + "かつ": messages.StepKeywordType_CONJUNCTION, + + "然し": messages.StepKeywordType_CONJUNCTION, + + "しかし": messages.StepKeywordType_CONJUNCTION, + + "但し": messages.StepKeywordType_CONJUNCTION, + + "ただし": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "jv": &Dialect{ + "jv", "Javanese", "Basa Jawa", map[string][]string{ + feature: { + "Fitur", + }, + rule: { + "Rule", + }, + background: { + "Dasar", + }, + scenario: { + "Skenario", + }, + scenarioOutline: { + "Konsep skenario", + }, + examples: { + "Conto", + "Contone", + }, + given: { + "* ", + "Nalika ", + "Nalikaning ", + }, + when: { + "* ", + "Manawa ", + "Menawa ", + }, + then: { + "* ", + "Njuk ", + "Banjur ", + }, + and: { + "* ", + "Lan ", + }, + but: { + "* ", + "Tapi ", + "Nanging ", + "Ananging ", + }, + }, + map[string]messages.StepKeywordType{ + "Nalika ": messages.StepKeywordType_CONTEXT, + + "Nalikaning ": messages.StepKeywordType_CONTEXT, + + "Manawa ": messages.StepKeywordType_ACTION, + + "Menawa ": messages.StepKeywordType_ACTION, + + "Njuk ": messages.StepKeywordType_OUTCOME, + + "Banjur ": messages.StepKeywordType_OUTCOME, + + "Lan ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "Nanging ": messages.StepKeywordType_CONJUNCTION, + + "Ananging ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ka": &Dialect{ + "ka", "Georgian", "ქართული", map[string][]string{ + feature: { + "თვისება", + "მოთხოვნა", + }, + rule: { + "წესი", + }, + background: { + "კონტექსტი", + }, + scenario: { + "მაგალითად", + "მაგალითი", + "მაგ", + "სცენარი", + }, + scenarioOutline: { + "სცენარის ნიმუში", + "სცენარის შაბლონი", + "ნიმუში", + "შაბლონი", + }, + examples: { + "მაგალითები", + }, + given: { + "* ", + "მოცემული ", + "მოცემულია ", + "ვთქვათ ", + }, + when: { + "* ", + "როდესაც ", + "როცა ", + "როგორც კი ", + "თუ ", + }, + then: { + "* ", + "მაშინ ", + }, + and: { + "* ", + "და ", + "ასევე ", + }, + but: { + "* ", + "მაგრამ ", + "თუმცა ", + }, + }, + map[string]messages.StepKeywordType{ + "მოცემული ": messages.StepKeywordType_CONTEXT, + + "მოცემულია ": messages.StepKeywordType_CONTEXT, + + "ვთქვათ ": messages.StepKeywordType_CONTEXT, + + "როდესაც ": messages.StepKeywordType_ACTION, + + "როცა ": messages.StepKeywordType_ACTION, + + "როგორც კი ": messages.StepKeywordType_ACTION, + + "თუ ": messages.StepKeywordType_ACTION, + + "მაშინ ": messages.StepKeywordType_OUTCOME, + + "და ": messages.StepKeywordType_CONJUNCTION, + + "ასევე ": messages.StepKeywordType_CONJUNCTION, + + "მაგრამ ": messages.StepKeywordType_CONJUNCTION, + + "თუმცა ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "kn": &Dialect{ + "kn", "Kannada", "ಕನ್ನಡ", map[string][]string{ + feature: { + "ಹೆಚ್ಚಳ", + }, + rule: { + "Rule", + }, + background: { + "ಹಿನ್ನೆಲೆ", + }, + scenario: { + "ಉದಾಹರಣೆ", + "ಕಥಾಸಾರಾಂಶ", + }, + scenarioOutline: { + "ವಿವರಣೆ", + }, + examples: { + "ಉದಾಹರಣೆಗಳು", + }, + given: { + "* ", + "ನೀಡಿದ ", + }, + when: { + "* ", + "ಸ್ಥಿತಿಯನ್ನು ", + }, + then: { + "* ", + "ನಂತರ ", + }, + and: { + "* ", + "ಮತ್ತು ", + }, + but: { + "* ", + "ಆದರೆ ", + }, + }, + map[string]messages.StepKeywordType{ + "ನೀಡಿದ ": messages.StepKeywordType_CONTEXT, + + "ಸ್ಥಿತಿಯನ್ನು ": messages.StepKeywordType_ACTION, + + "ನಂತರ ": messages.StepKeywordType_OUTCOME, + + "ಮತ್ತು ": messages.StepKeywordType_CONJUNCTION, + + "ಆದರೆ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ko": &Dialect{ + "ko", "Korean", "한국어", map[string][]string{ + feature: { + "기능", + }, + rule: { + "Rule", + }, + background: { + "배경", + }, + scenario: { + "시나리오", + }, + scenarioOutline: { + "시나리오 개요", + }, + examples: { + "예", + }, + given: { + "* ", + "조건", + "먼저", + }, + when: { + "* ", + "만일", + "만약", + }, + then: { + "* ", + "그러면", + }, + and: { + "* ", + "그리고", + }, + but: { + "* ", + "하지만", + "단", + }, + }, + map[string]messages.StepKeywordType{ + "조건": messages.StepKeywordType_CONTEXT, + + "먼저": messages.StepKeywordType_CONTEXT, + + "만일": messages.StepKeywordType_ACTION, + + "만약": messages.StepKeywordType_ACTION, + + "그러면": messages.StepKeywordType_OUTCOME, + + "그리고": messages.StepKeywordType_CONJUNCTION, + + "하지만": messages.StepKeywordType_CONJUNCTION, + + "단": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lt": &Dialect{ + "lt", "Lithuanian", "lietuvių kalba", map[string][]string{ + feature: { + "Savybė", + }, + rule: { + "Rule", + }, + background: { + "Kontekstas", + }, + scenario: { + "Pavyzdys", + "Scenarijus", + }, + scenarioOutline: { + "Scenarijaus šablonas", + }, + examples: { + "Pavyzdžiai", + "Scenarijai", + "Variantai", + }, + given: { + "* ", + "Duota ", + }, + when: { + "* ", + "Kai ", + }, + then: { + "* ", + "Tada ", + }, + and: { + "* ", + "Ir ", + }, + but: { + "* ", + "Bet ", + }, + }, + map[string]messages.StepKeywordType{ + "Duota ": messages.StepKeywordType_CONTEXT, + + "Kai ": messages.StepKeywordType_ACTION, + + "Tada ": messages.StepKeywordType_OUTCOME, + + "Ir ": messages.StepKeywordType_CONJUNCTION, + + "Bet ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lu": &Dialect{ + "lu", "Luxemburgish", "Lëtzebuergesch", map[string][]string{ + feature: { + "Funktionalitéit", + }, + rule: { + "Rule", + }, + background: { + "Hannergrond", + }, + scenario: { + "Beispill", + "Szenario", + }, + scenarioOutline: { + "Plang vum Szenario", + }, + examples: { + "Beispiller", + }, + given: { + "* ", + "ugeholl ", + }, + when: { + "* ", + "wann ", + }, + then: { + "* ", + "dann ", + }, + and: { + "* ", + "an ", + "a ", + }, + but: { + "* ", + "awer ", + "mä ", + }, + }, + map[string]messages.StepKeywordType{ + "ugeholl ": messages.StepKeywordType_CONTEXT, + + "wann ": messages.StepKeywordType_ACTION, + + "dann ": messages.StepKeywordType_OUTCOME, + + "an ": messages.StepKeywordType_CONJUNCTION, + + "a ": messages.StepKeywordType_CONJUNCTION, + + "awer ": messages.StepKeywordType_CONJUNCTION, + + "mä ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lv": &Dialect{ + "lv", "Latvian", "latviešu", map[string][]string{ + feature: { + "Funkcionalitāte", + "Fīča", + }, + rule: { + "Rule", + }, + background: { + "Konteksts", + "Situācija", + }, + scenario: { + "Piemērs", + "Scenārijs", + }, + scenarioOutline: { + "Scenārijs pēc parauga", + }, + examples: { + "Piemēri", + "Paraugs", + }, + given: { + "* ", + "Kad ", + }, + when: { + "* ", + "Ja ", + }, + then: { + "* ", + "Tad ", + }, + and: { + "* ", + "Un ", + }, + but: { + "* ", + "Bet ", + }, + }, + map[string]messages.StepKeywordType{ + "Kad ": messages.StepKeywordType_CONTEXT, + + "Ja ": messages.StepKeywordType_ACTION, + + "Tad ": messages.StepKeywordType_OUTCOME, + + "Un ": messages.StepKeywordType_CONJUNCTION, + + "Bet ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mk-Cyrl": &Dialect{ + "mk-Cyrl", "Macedonian", "Македонски", map[string][]string{ + feature: { + "Функционалност", + "Бизнис потреба", + "Можност", + }, + rule: { + "Rule", + }, + background: { + "Контекст", + "Содржина", + }, + scenario: { + "Пример", + "Сценарио", + "На пример", + }, + scenarioOutline: { + "Преглед на сценарија", + "Скица", + "Концепт", + }, + examples: { + "Примери", + "Сценарија", + }, + given: { + "* ", + "Дадено ", + "Дадена ", + }, + when: { + "* ", + "Кога ", + }, + then: { + "* ", + "Тогаш ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Но ", + }, + }, + map[string]messages.StepKeywordType{ + "Дадено ": messages.StepKeywordType_CONTEXT, + + "Дадена ": messages.StepKeywordType_CONTEXT, + + "Кога ": messages.StepKeywordType_ACTION, + + "Тогаш ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mk-Latn": &Dialect{ + "mk-Latn", "Macedonian (Latin)", "Makedonski (Latinica)", map[string][]string{ + feature: { + "Funkcionalnost", + "Biznis potreba", + "Mozhnost", + }, + rule: { + "Rule", + }, + background: { + "Kontekst", + "Sodrzhina", + }, + scenario: { + "Scenario", + "Na primer", + }, + scenarioOutline: { + "Pregled na scenarija", + "Skica", + "Koncept", + }, + examples: { + "Primeri", + "Scenaria", + }, + given: { + "* ", + "Dadeno ", + "Dadena ", + }, + when: { + "* ", + "Koga ", + }, + then: { + "* ", + "Togash ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "No ", + }, + }, + map[string]messages.StepKeywordType{ + "Dadeno ": messages.StepKeywordType_CONTEXT, + + "Dadena ": messages.StepKeywordType_CONTEXT, + + "Koga ": messages.StepKeywordType_ACTION, + + "Togash ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "No ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mn": &Dialect{ + "mn", "Mongolian", "монгол", map[string][]string{ + feature: { + "Функц", + "Функционал", + }, + rule: { + "Rule", + }, + background: { + "Агуулга", + }, + scenario: { + "Сценар", + }, + scenarioOutline: { + "Сценарын төлөвлөгөө", + }, + examples: { + "Тухайлбал", + }, + given: { + "* ", + "Өгөгдсөн нь ", + "Анх ", + }, + when: { + "* ", + "Хэрэв ", + }, + then: { + "* ", + "Тэгэхэд ", + "Үүний дараа ", + }, + and: { + "* ", + "Мөн ", + "Тэгээд ", + }, + but: { + "* ", + "Гэхдээ ", + "Харин ", + }, + }, + map[string]messages.StepKeywordType{ + "Өгөгдсөн нь ": messages.StepKeywordType_CONTEXT, + + "Анх ": messages.StepKeywordType_CONTEXT, + + "Хэрэв ": messages.StepKeywordType_ACTION, + + "Тэгэхэд ": messages.StepKeywordType_OUTCOME, + + "Үүний дараа ": messages.StepKeywordType_OUTCOME, + + "Мөн ": messages.StepKeywordType_CONJUNCTION, + + "Тэгээд ": messages.StepKeywordType_CONJUNCTION, + + "Гэхдээ ": messages.StepKeywordType_CONJUNCTION, + + "Харин ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ne": &Dialect{ + "ne", "Nepali", "नेपाली", map[string][]string{ + feature: { + "सुविधा", + "विशेषता", + }, + rule: { + "नियम", + }, + background: { + "पृष्ठभूमी", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + "उदाहरणहरु", + }, + given: { + "* ", + "दिइएको ", + "दिएको ", + "यदि ", + }, + when: { + "* ", + "जब ", + }, + then: { + "* ", + "त्यसपछि ", + "अनी ", + }, + and: { + "* ", + "र ", + "अनि ", + }, + but: { + "* ", + "तर ", + }, + }, + map[string]messages.StepKeywordType{ + "दिइएको ": messages.StepKeywordType_CONTEXT, + + "दिएको ": messages.StepKeywordType_CONTEXT, + + "यदि ": messages.StepKeywordType_CONTEXT, + + "जब ": messages.StepKeywordType_ACTION, + + "त्यसपछि ": messages.StepKeywordType_OUTCOME, + + "अनी ": messages.StepKeywordType_OUTCOME, + + "र ": messages.StepKeywordType_CONJUNCTION, + + "अनि ": messages.StepKeywordType_CONJUNCTION, + + "तर ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "nl": &Dialect{ + "nl", "Dutch", "Nederlands", map[string][]string{ + feature: { + "Functionaliteit", + }, + rule: { + "Rule", + }, + background: { + "Achtergrond", + }, + scenario: { + "Voorbeeld", + "Scenario", + }, + scenarioOutline: { + "Abstract Scenario", + }, + examples: { + "Voorbeelden", + }, + given: { + "* ", + "Gegeven ", + "Stel ", + }, + when: { + "* ", + "Als ", + "Wanneer ", + }, + then: { + "* ", + "Dan ", + }, + and: { + "* ", + "En ", + }, + but: { + "* ", + "Maar ", + }, + }, + map[string]messages.StepKeywordType{ + "Gegeven ": messages.StepKeywordType_CONTEXT, + + "Stel ": messages.StepKeywordType_CONTEXT, + + "Als ": messages.StepKeywordType_ACTION, + + "Wanneer ": messages.StepKeywordType_ACTION, + + "Dan ": messages.StepKeywordType_OUTCOME, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "Maar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "no": &Dialect{ + "no", "Norwegian", "norsk", map[string][]string{ + feature: { + "Egenskap", + }, + rule: { + "Regel", + }, + background: { + "Bakgrunn", + }, + scenario: { + "Eksempel", + "Scenario", + }, + scenarioOutline: { + "Scenariomal", + "Abstrakt Scenario", + }, + examples: { + "Eksempler", + }, + given: { + "* ", + "Gitt ", + }, + when: { + "* ", + "Når ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Gitt ": messages.StepKeywordType_CONTEXT, + + "Når ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pa": &Dialect{ + "pa", "Panjabi", "ਪੰਜਾਬੀ", map[string][]string{ + feature: { + "ਖਾਸੀਅਤ", + "ਮੁਹਾਂਦਰਾ", + "ਨਕਸ਼ ਨੁਹਾਰ", + }, + rule: { + "Rule", + }, + background: { + "ਪਿਛੋਕੜ", + }, + scenario: { + "ਉਦਾਹਰਨ", + "ਪਟਕਥਾ", + }, + scenarioOutline: { + "ਪਟਕਥਾ ਢਾਂਚਾ", + "ਪਟਕਥਾ ਰੂਪ ਰੇਖਾ", + }, + examples: { + "ਉਦਾਹਰਨਾਂ", + }, + given: { + "* ", + "ਜੇਕਰ ", + "ਜਿਵੇਂ ਕਿ ", + }, + when: { + "* ", + "ਜਦੋਂ ", + }, + then: { + "* ", + "ਤਦ ", + }, + and: { + "* ", + "ਅਤੇ ", + }, + but: { + "* ", + "ਪਰ ", + }, + }, + map[string]messages.StepKeywordType{ + "ਜੇਕਰ ": messages.StepKeywordType_CONTEXT, + + "ਜਿਵੇਂ ਕਿ ": messages.StepKeywordType_CONTEXT, + + "ਜਦੋਂ ": messages.StepKeywordType_ACTION, + + "ਤਦ ": messages.StepKeywordType_OUTCOME, + + "ਅਤੇ ": messages.StepKeywordType_CONJUNCTION, + + "ਪਰ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pl": &Dialect{ + "pl", "Polish", "polski", map[string][]string{ + feature: { + "Właściwość", + "Funkcja", + "Aspekt", + "Potrzeba biznesowa", + }, + rule: { + "Zasada", + "Reguła", + }, + background: { + "Założenia", + }, + scenario: { + "Przykład", + "Scenariusz", + }, + scenarioOutline: { + "Szablon scenariusza", + }, + examples: { + "Przykłady", + }, + given: { + "* ", + "Zakładając ", + "Mając ", + "Zakładając, że ", + }, + when: { + "* ", + "Jeżeli ", + "Jeśli ", + "Gdy ", + "Kiedy ", + }, + then: { + "* ", + "Wtedy ", + }, + and: { + "* ", + "Oraz ", + "I ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Zakładając ": messages.StepKeywordType_CONTEXT, + + "Mając ": messages.StepKeywordType_CONTEXT, + + "Zakładając, że ": messages.StepKeywordType_CONTEXT, + + "Jeżeli ": messages.StepKeywordType_ACTION, + + "Jeśli ": messages.StepKeywordType_ACTION, + + "Gdy ": messages.StepKeywordType_ACTION, + + "Kiedy ": messages.StepKeywordType_ACTION, + + "Wtedy ": messages.StepKeywordType_OUTCOME, + + "Oraz ": messages.StepKeywordType_CONJUNCTION, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pt": &Dialect{ + "pt", "Portuguese", "português", map[string][]string{ + feature: { + "Funcionalidade", + "Característica", + "Caracteristica", + }, + rule: { + "Regra", + }, + background: { + "Contexto", + "Cenário de Fundo", + "Cenario de Fundo", + "Fundo", + }, + scenario: { + "Exemplo", + "Cenário", + "Cenario", + }, + scenarioOutline: { + "Esquema do Cenário", + "Esquema do Cenario", + "Delineação do Cenário", + "Delineacao do Cenario", + }, + examples: { + "Exemplos", + "Cenários", + "Cenarios", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Quando ", + }, + then: { + "* ", + "Então ", + "Entao ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Mas ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Quando ": messages.StepKeywordType_ACTION, + + "Então ": messages.StepKeywordType_OUTCOME, + + "Entao ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Mas ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ro": &Dialect{ + "ro", "Romanian", "română", map[string][]string{ + feature: { + "Functionalitate", + "Funcționalitate", + "Funcţionalitate", + }, + rule: { + "Rule", + }, + background: { + "Context", + }, + scenario: { + "Exemplu", + "Scenariu", + }, + scenarioOutline: { + "Structura scenariu", + "Structură scenariu", + }, + examples: { + "Exemple", + }, + given: { + "* ", + "Date fiind ", + "Dat fiind ", + "Dată fiind", + "Dati fiind ", + "Dați fiind ", + "Daţi fiind ", + }, + when: { + "* ", + "Cand ", + "Când ", + }, + then: { + "* ", + "Atunci ", + }, + and: { + "* ", + "Si ", + "Și ", + "Şi ", + }, + but: { + "* ", + "Dar ", + }, + }, + map[string]messages.StepKeywordType{ + "Date fiind ": messages.StepKeywordType_CONTEXT, + + "Dat fiind ": messages.StepKeywordType_CONTEXT, + + "Dată fiind": messages.StepKeywordType_CONTEXT, + + "Dati fiind ": messages.StepKeywordType_CONTEXT, + + "Dați fiind ": messages.StepKeywordType_CONTEXT, + + "Daţi fiind ": messages.StepKeywordType_CONTEXT, + + "Cand ": messages.StepKeywordType_ACTION, + + "Când ": messages.StepKeywordType_ACTION, + + "Atunci ": messages.StepKeywordType_OUTCOME, + + "Si ": messages.StepKeywordType_CONJUNCTION, + + "Și ": messages.StepKeywordType_CONJUNCTION, + + "Şi ": messages.StepKeywordType_CONJUNCTION, + + "Dar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ru": &Dialect{ + "ru", "Russian", "русский", map[string][]string{ + feature: { + "Функция", + "Функциональность", + "Функционал", + "Свойство", + "Фича", + }, + rule: { + "Правило", + }, + background: { + "Предыстория", + "Контекст", + }, + scenario: { + "Пример", + "Сценарий", + }, + scenarioOutline: { + "Структура сценария", + "Шаблон сценария", + }, + examples: { + "Примеры", + }, + given: { + "* ", + "Допустим ", + "Дано ", + "Пусть ", + }, + when: { + "* ", + "Когда ", + "Если ", + }, + then: { + "* ", + "То ", + "Затем ", + "Тогда ", + }, + and: { + "* ", + "И ", + "К тому же ", + "Также ", + }, + but: { + "* ", + "Но ", + "А ", + "Иначе ", + }, + }, + map[string]messages.StepKeywordType{ + "Допустим ": messages.StepKeywordType_CONTEXT, + + "Дано ": messages.StepKeywordType_CONTEXT, + + "Пусть ": messages.StepKeywordType_CONTEXT, + + "Когда ": messages.StepKeywordType_ACTION, + + "Если ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "Затем ": messages.StepKeywordType_OUTCOME, + + "Тогда ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "К тому же ": messages.StepKeywordType_CONJUNCTION, + + "Также ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "А ": messages.StepKeywordType_CONJUNCTION, + + "Иначе ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sk": &Dialect{ + "sk", "Slovak", "Slovensky", map[string][]string{ + feature: { + "Požiadavka", + "Funkcia", + "Vlastnosť", + }, + rule: { + "Rule", + }, + background: { + "Pozadie", + }, + scenario: { + "Príklad", + "Scenár", + }, + scenarioOutline: { + "Náčrt Scenáru", + "Náčrt Scenára", + "Osnova Scenára", + }, + examples: { + "Príklady", + }, + given: { + "* ", + "Pokiaľ ", + "Za predpokladu ", + }, + when: { + "* ", + "Keď ", + "Ak ", + }, + then: { + "* ", + "Tak ", + "Potom ", + }, + and: { + "* ", + "A ", + "A tiež ", + "A taktiež ", + "A zároveň ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Pokiaľ ": messages.StepKeywordType_CONTEXT, + + "Za predpokladu ": messages.StepKeywordType_CONTEXT, + + "Keď ": messages.StepKeywordType_ACTION, + + "Ak ": messages.StepKeywordType_ACTION, + + "Tak ": messages.StepKeywordType_OUTCOME, + + "Potom ": messages.StepKeywordType_OUTCOME, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "A tiež ": messages.StepKeywordType_CONJUNCTION, + + "A taktiež ": messages.StepKeywordType_CONJUNCTION, + + "A zároveň ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sl": &Dialect{ + "sl", "Slovenian", "Slovenski", map[string][]string{ + feature: { + "Funkcionalnost", + "Funkcija", + "Možnosti", + "Moznosti", + "Lastnost", + "Značilnost", + }, + rule: { + "Rule", + }, + background: { + "Kontekst", + "Osnova", + "Ozadje", + }, + scenario: { + "Primer", + "Scenarij", + }, + scenarioOutline: { + "Struktura scenarija", + "Skica", + "Koncept", + "Oris scenarija", + "Osnutek", + }, + examples: { + "Primeri", + "Scenariji", + }, + given: { + "Dano ", + "Podano ", + "Zaradi ", + "Privzeto ", + }, + when: { + "Ko ", + "Ce ", + "Če ", + "Kadar ", + }, + then: { + "Nato ", + "Potem ", + "Takrat ", + }, + and: { + "In ", + "Ter ", + }, + but: { + "Toda ", + "Ampak ", + "Vendar ", + }, + }, + map[string]messages.StepKeywordType{ + "Dano ": messages.StepKeywordType_CONTEXT, + + "Podano ": messages.StepKeywordType_CONTEXT, + + "Zaradi ": messages.StepKeywordType_CONTEXT, + + "Privzeto ": messages.StepKeywordType_CONTEXT, + + "Ko ": messages.StepKeywordType_ACTION, + + "Ce ": messages.StepKeywordType_ACTION, + + "Če ": messages.StepKeywordType_ACTION, + + "Kadar ": messages.StepKeywordType_ACTION, + + "Nato ": messages.StepKeywordType_OUTCOME, + + "Potem ": messages.StepKeywordType_OUTCOME, + + "Takrat ": messages.StepKeywordType_OUTCOME, + + "In ": messages.StepKeywordType_CONJUNCTION, + + "Ter ": messages.StepKeywordType_CONJUNCTION, + + "Toda ": messages.StepKeywordType_CONJUNCTION, + + "Ampak ": messages.StepKeywordType_CONJUNCTION, + + "Vendar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sr-Cyrl": &Dialect{ + "sr-Cyrl", "Serbian", "Српски", map[string][]string{ + feature: { + "Функционалност", + "Могућност", + "Особина", + }, + rule: { + "Правило", + }, + background: { + "Контекст", + "Основа", + "Позадина", + }, + scenario: { + "Пример", + "Сценарио", + "Пример", + }, + scenarioOutline: { + "Структура сценарија", + "Скица", + "Концепт", + }, + examples: { + "Примери", + "Сценарији", + }, + given: { + "* ", + "За дато ", + "За дате ", + "За дати ", + }, + when: { + "* ", + "Када ", + "Кад ", + }, + then: { + "* ", + "Онда ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Али ", + }, + }, + map[string]messages.StepKeywordType{ + "За дато ": messages.StepKeywordType_CONTEXT, + + "За дате ": messages.StepKeywordType_CONTEXT, + + "За дати ": messages.StepKeywordType_CONTEXT, + + "Када ": messages.StepKeywordType_ACTION, + + "Кад ": messages.StepKeywordType_ACTION, + + "Онда ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Али ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sr-Latn": &Dialect{ + "sr-Latn", "Serbian (Latin)", "Srpski (Latinica)", map[string][]string{ + feature: { + "Funkcionalnost", + "Mogućnost", + "Mogucnost", + "Osobina", + }, + rule: { + "Pravilo", + }, + background: { + "Kontekst", + "Osnova", + "Pozadina", + }, + scenario: { + "Scenario", + "Primer", + }, + scenarioOutline: { + "Struktura scenarija", + "Skica", + "Koncept", + }, + examples: { + "Primeri", + "Scenariji", + }, + given: { + "* ", + "Za dato ", + "Za date ", + "Za dati ", + }, + when: { + "* ", + "Kada ", + "Kad ", + }, + then: { + "* ", + "Onda ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Za dato ": messages.StepKeywordType_CONTEXT, + + "Za date ": messages.StepKeywordType_CONTEXT, + + "Za dati ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Kad ": messages.StepKeywordType_ACTION, + + "Onda ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sv": &Dialect{ + "sv", "Swedish", "Svenska", map[string][]string{ + feature: { + "Egenskap", + }, + rule: { + "Regel", + }, + background: { + "Bakgrund", + }, + scenario: { + "Scenario", + }, + scenarioOutline: { + "Abstrakt Scenario", + "Scenariomall", + }, + examples: { + "Exempel", + }, + given: { + "* ", + "Givet ", + }, + when: { + "* ", + "När ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Och ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Givet ": messages.StepKeywordType_CONTEXT, + + "När ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Och ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ta": &Dialect{ + "ta", "Tamil", "தமிழ்", map[string][]string{ + feature: { + "அம்சம்", + "வணிக தேவை", + "திறன்", + }, + rule: { + "Rule", + }, + background: { + "பின்னணி", + }, + scenario: { + "உதாரணமாக", + "காட்சி", + }, + scenarioOutline: { + "காட்சி சுருக்கம்", + "காட்சி வார்ப்புரு", + }, + examples: { + "எடுத்துக்காட்டுகள்", + "காட்சிகள்", + "நிலைமைகளில்", + }, + given: { + "* ", + "கொடுக்கப்பட்ட ", + }, + when: { + "* ", + "எப்போது ", + }, + then: { + "* ", + "அப்பொழுது ", + }, + and: { + "* ", + "மேலும் ", + "மற்றும் ", + }, + but: { + "* ", + "ஆனால் ", + }, + }, + map[string]messages.StepKeywordType{ + "கொடுக்கப்பட்ட ": messages.StepKeywordType_CONTEXT, + + "எப்போது ": messages.StepKeywordType_ACTION, + + "அப்பொழுது ": messages.StepKeywordType_OUTCOME, + + "மேலும் ": messages.StepKeywordType_CONJUNCTION, + + "மற்றும் ": messages.StepKeywordType_CONJUNCTION, + + "ஆனால் ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "th": &Dialect{ + "th", "Thai", "ไทย", map[string][]string{ + feature: { + "โครงหลัก", + "ความต้องการทางธุรกิจ", + "ความสามารถ", + }, + rule: { + "Rule", + }, + background: { + "แนวคิด", + }, + scenario: { + "เหตุการณ์", + }, + scenarioOutline: { + "สรุปเหตุการณ์", + "โครงสร้างของเหตุการณ์", + }, + examples: { + "ชุดของตัวอย่าง", + "ชุดของเหตุการณ์", + }, + given: { + "* ", + "กำหนดให้ ", + }, + when: { + "* ", + "เมื่อ ", + }, + then: { + "* ", + "ดังนั้น ", + }, + and: { + "* ", + "และ ", + }, + but: { + "* ", + "แต่ ", + }, + }, + map[string]messages.StepKeywordType{ + "กำหนดให้ ": messages.StepKeywordType_CONTEXT, + + "เมื่อ ": messages.StepKeywordType_ACTION, + + "ดังนั้น ": messages.StepKeywordType_OUTCOME, + + "และ ": messages.StepKeywordType_CONJUNCTION, + + "แต่ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "te": &Dialect{ + "te", "Telugu", "తెలుగు", map[string][]string{ + feature: { + "గుణము", + }, + rule: { + "Rule", + }, + background: { + "నేపథ్యం", + }, + scenario: { + "ఉదాహరణ", + "సన్నివేశం", + }, + scenarioOutline: { + "కథనం", + }, + examples: { + "ఉదాహరణలు", + }, + given: { + "* ", + "చెప్పబడినది ", + }, + when: { + "* ", + "ఈ పరిస్థితిలో ", + }, + then: { + "* ", + "అప్పుడు ", + }, + and: { + "* ", + "మరియు ", + }, + but: { + "* ", + "కాని ", + }, + }, + map[string]messages.StepKeywordType{ + "చెప్పబడినది ": messages.StepKeywordType_CONTEXT, + + "ఈ పరిస్థితిలో ": messages.StepKeywordType_ACTION, + + "అప్పుడు ": messages.StepKeywordType_OUTCOME, + + "మరియు ": messages.StepKeywordType_CONJUNCTION, + + "కాని ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tlh": &Dialect{ + "tlh", "Klingon", "tlhIngan", map[string][]string{ + feature: { + "Qap", + "Qu'meH 'ut", + "perbogh", + "poQbogh malja'", + "laH", + }, + rule: { + "Rule", + }, + background: { + "mo'", + }, + scenario: { + "lut", + }, + scenarioOutline: { + "lut chovnatlh", + }, + examples: { + "ghantoH", + "lutmey", + }, + given: { + "* ", + "ghu' noblu' ", + "DaH ghu' bejlu' ", + }, + when: { + "* ", + "qaSDI' ", + }, + then: { + "* ", + "vaj ", + }, + and: { + "* ", + "'ej ", + "latlh ", + }, + but: { + "* ", + "'ach ", + "'a ", + }, + }, + map[string]messages.StepKeywordType{ + "ghu' noblu' ": messages.StepKeywordType_CONTEXT, + + "DaH ghu' bejlu' ": messages.StepKeywordType_CONTEXT, + + "qaSDI' ": messages.StepKeywordType_ACTION, + + "vaj ": messages.StepKeywordType_OUTCOME, + + "'ej ": messages.StepKeywordType_CONJUNCTION, + + "latlh ": messages.StepKeywordType_CONJUNCTION, + + "'ach ": messages.StepKeywordType_CONJUNCTION, + + "'a ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tr": &Dialect{ + "tr", "Turkish", "Türkçe", map[string][]string{ + feature: { + "Özellik", + }, + rule: { + "Kural", + }, + background: { + "Geçmiş", + }, + scenario: { + "Örnek", + "Senaryo", + }, + scenarioOutline: { + "Senaryo taslağı", + }, + examples: { + "Örnekler", + }, + given: { + "* ", + "Diyelim ki ", + }, + when: { + "* ", + "Eğer ki ", + }, + then: { + "* ", + "O zaman ", + }, + and: { + "* ", + "Ve ", + }, + but: { + "* ", + "Fakat ", + "Ama ", + }, + }, + map[string]messages.StepKeywordType{ + "Diyelim ki ": messages.StepKeywordType_CONTEXT, + + "Eğer ki ": messages.StepKeywordType_ACTION, + + "O zaman ": messages.StepKeywordType_OUTCOME, + + "Ve ": messages.StepKeywordType_CONJUNCTION, + + "Fakat ": messages.StepKeywordType_CONJUNCTION, + + "Ama ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tt": &Dialect{ + "tt", "Tatar", "Татарча", map[string][]string{ + feature: { + "Мөмкинлек", + "Үзенчәлеклелек", + }, + rule: { + "Rule", + }, + background: { + "Кереш", + }, + scenario: { + "Сценарий", + }, + scenarioOutline: { + "Сценарийның төзелеше", + }, + examples: { + "Үрнәкләр", + "Мисаллар", + }, + given: { + "* ", + "Әйтик ", + }, + when: { + "* ", + "Әгәр ", + }, + then: { + "* ", + "Нәтиҗәдә ", + }, + and: { + "* ", + "Һәм ", + "Вә ", + }, + but: { + "* ", + "Ләкин ", + "Әмма ", + }, + }, + map[string]messages.StepKeywordType{ + "Әйтик ": messages.StepKeywordType_CONTEXT, + + "Әгәр ": messages.StepKeywordType_ACTION, + + "Нәтиҗәдә ": messages.StepKeywordType_OUTCOME, + + "Һәм ": messages.StepKeywordType_CONJUNCTION, + + "Вә ": messages.StepKeywordType_CONJUNCTION, + + "Ләкин ": messages.StepKeywordType_CONJUNCTION, + + "Әмма ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "uk": &Dialect{ + "uk", "Ukrainian", "Українська", map[string][]string{ + feature: { + "Функціонал", + }, + rule: { + "Rule", + }, + background: { + "Передумова", + }, + scenario: { + "Приклад", + "Сценарій", + }, + scenarioOutline: { + "Структура сценарію", + }, + examples: { + "Приклади", + }, + given: { + "* ", + "Припустимо ", + "Припустимо, що ", + "Нехай ", + "Дано ", + }, + when: { + "* ", + "Якщо ", + "Коли ", + }, + then: { + "* ", + "То ", + "Тоді ", + }, + and: { + "* ", + "І ", + "А також ", + "Та ", + }, + but: { + "* ", + "Але ", + }, + }, + map[string]messages.StepKeywordType{ + "Припустимо ": messages.StepKeywordType_CONTEXT, + + "Припустимо, що ": messages.StepKeywordType_CONTEXT, + + "Нехай ": messages.StepKeywordType_CONTEXT, + + "Дано ": messages.StepKeywordType_CONTEXT, + + "Якщо ": messages.StepKeywordType_ACTION, + + "Коли ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "Тоді ": messages.StepKeywordType_OUTCOME, + + "І ": messages.StepKeywordType_CONJUNCTION, + + "А також ": messages.StepKeywordType_CONJUNCTION, + + "Та ": messages.StepKeywordType_CONJUNCTION, + + "Але ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ur": &Dialect{ + "ur", "Urdu", "اردو", map[string][]string{ + feature: { + "صلاحیت", + "کاروبار کی ضرورت", + "خصوصیت", + }, + rule: { + "Rule", + }, + background: { + "پس منظر", + }, + scenario: { + "منظرنامہ", + }, + scenarioOutline: { + "منظر نامے کا خاکہ", + }, + examples: { + "مثالیں", + }, + given: { + "* ", + "اگر ", + "بالفرض ", + "فرض کیا ", + }, + when: { + "* ", + "جب ", + }, + then: { + "* ", + "پھر ", + "تب ", + }, + and: { + "* ", + "اور ", + }, + but: { + "* ", + "لیکن ", + }, + }, + map[string]messages.StepKeywordType{ + "اگر ": messages.StepKeywordType_CONTEXT, + + "بالفرض ": messages.StepKeywordType_CONTEXT, + + "فرض کیا ": messages.StepKeywordType_CONTEXT, + + "جب ": messages.StepKeywordType_ACTION, + + "پھر ": messages.StepKeywordType_OUTCOME, + + "تب ": messages.StepKeywordType_OUTCOME, + + "اور ": messages.StepKeywordType_CONJUNCTION, + + "لیکن ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "uz": &Dialect{ + "uz", "Uzbek", "Узбекча", map[string][]string{ + feature: { + "Функционал", + }, + rule: { + "Rule", + }, + background: { + "Тарих", + }, + scenario: { + "Сценарий", + }, + scenarioOutline: { + "Сценарий структураси", + }, + examples: { + "Мисоллар", + }, + given: { + "* ", + "Belgilangan ", + }, + when: { + "* ", + "Агар ", + }, + then: { + "* ", + "Унда ", + }, + and: { + "* ", + "Ва ", + }, + but: { + "* ", + "Лекин ", + "Бирок ", + "Аммо ", + }, + }, + map[string]messages.StepKeywordType{ + "Belgilangan ": messages.StepKeywordType_CONTEXT, + + "Агар ": messages.StepKeywordType_ACTION, + + "Унда ": messages.StepKeywordType_OUTCOME, + + "Ва ": messages.StepKeywordType_CONJUNCTION, + + "Лекин ": messages.StepKeywordType_CONJUNCTION, + + "Бирок ": messages.StepKeywordType_CONJUNCTION, + + "Аммо ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "vi": &Dialect{ + "vi", "Vietnamese", "Tiếng Việt", map[string][]string{ + feature: { + "Tính năng", + }, + rule: { + "Rule", + }, + background: { + "Bối cảnh", + }, + scenario: { + "Tình huống", + "Kịch bản", + }, + scenarioOutline: { + "Khung tình huống", + "Khung kịch bản", + }, + examples: { + "Dữ liệu", + }, + given: { + "* ", + "Biết ", + "Cho ", + }, + when: { + "* ", + "Khi ", + }, + then: { + "* ", + "Thì ", + }, + and: { + "* ", + "Và ", + }, + but: { + "* ", + "Nhưng ", + }, + }, + map[string]messages.StepKeywordType{ + "Biết ": messages.StepKeywordType_CONTEXT, + + "Cho ": messages.StepKeywordType_CONTEXT, + + "Khi ": messages.StepKeywordType_ACTION, + + "Thì ": messages.StepKeywordType_OUTCOME, + + "Và ": messages.StepKeywordType_CONJUNCTION, + + "Nhưng ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "zh-CN": &Dialect{ + "zh-CN", "Chinese simplified", "简体中文", map[string][]string{ + feature: { + "功能", + }, + rule: { + "Rule", + "规则", + }, + background: { + "背景", + }, + scenario: { + "场景", + "剧本", + }, + scenarioOutline: { + "场景大纲", + "剧本大纲", + }, + examples: { + "例子", + }, + given: { + "* ", + "假如", + "假设", + "假定", + }, + when: { + "* ", + "当", + }, + then: { + "* ", + "那么", + }, + and: { + "* ", + "而且", + "并且", + "同时", + }, + but: { + "* ", + "但是", + }, + }, + map[string]messages.StepKeywordType{ + "假如": messages.StepKeywordType_CONTEXT, + + "假设": messages.StepKeywordType_CONTEXT, + + "假定": messages.StepKeywordType_CONTEXT, + + "当": messages.StepKeywordType_ACTION, + + "那么": messages.StepKeywordType_OUTCOME, + + "而且": messages.StepKeywordType_CONJUNCTION, + + "并且": messages.StepKeywordType_CONJUNCTION, + + "同时": messages.StepKeywordType_CONJUNCTION, + + "但是": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "zh-TW": &Dialect{ + "zh-TW", "Chinese traditional", "繁體中文", map[string][]string{ + feature: { + "功能", + }, + rule: { + "Rule", + }, + background: { + "背景", + }, + scenario: { + "場景", + "劇本", + }, + scenarioOutline: { + "場景大綱", + "劇本大綱", + }, + examples: { + "例子", + }, + given: { + "* ", + "假如", + "假設", + "假定", + }, + when: { + "* ", + "當", + }, + then: { + "* ", + "那麼", + }, + and: { + "* ", + "而且", + "並且", + "同時", + }, + but: { + "* ", + "但是", + }, + }, + map[string]messages.StepKeywordType{ + "假如": messages.StepKeywordType_CONTEXT, + + "假設": messages.StepKeywordType_CONTEXT, + + "假定": messages.StepKeywordType_CONTEXT, + + "當": messages.StepKeywordType_ACTION, + + "那麼": messages.StepKeywordType_OUTCOME, + + "而且": messages.StepKeywordType_CONJUNCTION, + + "並且": messages.StepKeywordType_CONJUNCTION, + + "同時": messages.StepKeywordType_CONJUNCTION, + + "但是": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mr": &Dialect{ + "mr", "Marathi", "मराठी", map[string][]string{ + feature: { + "वैशिष्ट्य", + "सुविधा", + }, + rule: { + "नियम", + }, + background: { + "पार्श्वभूमी", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + }, + given: { + "* ", + "जर", + "दिलेल्या प्रमाणे ", + }, + when: { + "* ", + "जेव्हा ", + }, + then: { + "* ", + "मग ", + "तेव्हा ", + }, + and: { + "* ", + "आणि ", + "तसेच ", + }, + but: { + "* ", + "पण ", + "परंतु ", + }, + }, + map[string]messages.StepKeywordType{ + "जर": messages.StepKeywordType_CONTEXT, + + "दिलेल्या प्रमाणे ": messages.StepKeywordType_CONTEXT, + + "जेव्हा ": messages.StepKeywordType_ACTION, + + "मग ": messages.StepKeywordType_OUTCOME, + + "तेव्हा ": messages.StepKeywordType_OUTCOME, + + "आणि ": messages.StepKeywordType_CONJUNCTION, + + "तसेच ": messages.StepKeywordType_CONJUNCTION, + + "पण ": messages.StepKeywordType_CONJUNCTION, + + "परंतु ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "amh": &Dialect{ + "amh", "Amharic", "አማርኛ", map[string][]string{ + feature: { + "ስራ", + "የተፈለገው ስራ", + "የሚፈለገው ድርጊት", + }, + rule: { + "ህግ", + }, + background: { + "ቅድመ ሁኔታ", + "መነሻ", + "መነሻ ሀሳብ", + }, + scenario: { + "ምሳሌ", + "ሁናቴ", + }, + scenarioOutline: { + "ሁናቴ ዝርዝር", + "ሁናቴ አብነት", + }, + examples: { + "ምሳሌዎች", + "ሁናቴዎች", + }, + given: { + "* ", + "የተሰጠ ", + }, + when: { + "* ", + "መቼ ", + }, + then: { + "* ", + "ከዚያ ", + }, + and: { + "* ", + "እና ", + }, + but: { + "* ", + "ግን ", + }, + }, + map[string]messages.StepKeywordType{ + "የተሰጠ ": messages.StepKeywordType_CONTEXT, + + "መቼ ": messages.StepKeywordType_ACTION, + + "ከዚያ ": messages.StepKeywordType_OUTCOME, + + "እና ": messages.StepKeywordType_CONJUNCTION, + + "ግን ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq new file mode 100644 index 000000000..72ef6170f --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq @@ -0,0 +1,110 @@ +. as $root +| ( + [ to_entries[] + | [ + "\t",(.key|@json),": &Dialect{\n", + "\t\t", (.key|@json),", ", (.value.name|@json),", ", (.value.native|@json), ", map[string][]string{\n" + ] + ( + [ .value + | {"feature","rule","background","scenario","scenarioOutline","examples","given","when","then","and","but"} + | to_entries[] + | "\t\t\t"+(.key), ": {\n", + ([ .value[] | "\t\t\t\t", @json, ",\n" ]|add), + "\t\t\t},\n" + ] + ) + [ + "\t\t},\n", + "\t\tmap[string]messages.StepKeywordType{\n" + ] + ( + [ .value.given + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONTEXT", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.when + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_ACTION", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.then + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_OUTCOME", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.and + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONJUNCTION", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.but + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONJUNCTION", + ",\n\n" + ] | add + ), + "" + ] + + [ + "\t\t\t\"* \": messages.StepKeywordType_UNKNOWN,\n" + ] + ) + [ + "\t\t}", + "},\n" + ] + | add + ] + | add + ) +| "package gherkin\n\n" ++ "import messages \"github.com/cucumber/messages/go/v21\"\n\n" ++ "// Builtin dialects for " + ([ $root | to_entries[] | .key+" ("+.value.name+")" ] | join(", ")) + "\n" ++ "func DialectsBuiltin() DialectProvider {\n" ++ "\treturn builtinDialects\n" ++ "}\n\n" ++ "const (\n" ++ " feature = \"feature\"\n" ++ " rule = \"rule\"\n" ++ " background = \"background\"\n" ++ " scenario = \"scenario\"\n" ++ " scenarioOutline = \"scenarioOutline\"\n" ++ " examples = \"examples\"\n" ++ " given = \"given\"\n" ++ " when = \"when\"\n" ++ " then = \"then\"\n" ++ " and = \"and\"\n" ++ " but = \"but\"\n" ++ ")\n\n" ++ "var builtinDialects = gherkinDialectMap{\n" ++ . ++ "}" diff --git a/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go new file mode 100644 index 000000000..524d16e11 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go @@ -0,0 +1,143 @@ +package gherkin + +import ( + "bufio" + "fmt" + "github.com/cucumber/messages/go/v21" + "io" + "strings" +) + +type Parser interface { + StopAtFirstError(b bool) + Parse(s Scanner, m Matcher) (err error) +} + +/* +The Scanner reads a gherkin doc (typically read from a .feature file) and creates a token for +each line. The tokens are passed to the parser, which outputs an AST (Abstract Syntax Tree). + +If the scanner sees a # language header, it will reconfigure itself dynamically to look for +Gherkin keywords for the associated language. The keywords are defined in gherkin-languages.json. +*/ +type Scanner interface { + Scan() (line *Line, atEof bool, err error) +} + +type Builder interface { + Build(*Token) (bool, error) + StartRule(RuleType) (bool, error) + EndRule(RuleType) (bool, error) + Reset() +} + +type Token struct { + Type TokenType + Keyword string + KeywordType messages.StepKeywordType + Text string + Items []*LineSpan + GherkinDialect string + Indent string + Location *Location +} + +func (t *Token) IsEOF() bool { + return t.Type == TokenTypeEOF +} +func (t *Token) String() string { + return fmt.Sprintf("%v: %s/%s", t.Type, t.Keyword, t.Text) +} + +type LineSpan struct { + Column int + Text string +} + +func (l *LineSpan) String() string { + return fmt.Sprintf("%d:%s", l.Column, l.Text) +} + +type parser struct { + builder Builder + stopAtFirstError bool +} + +func NewParser(b Builder) Parser { + return &parser{ + builder: b, + } +} + +func (p *parser) StopAtFirstError(b bool) { + p.stopAtFirstError = b +} + +func NewScanner(r io.Reader) Scanner { + return &scanner{ + s: bufio.NewScanner(r), + line: 0, + } +} + +type scanner struct { + s *bufio.Scanner + line int +} + +func (t *scanner) Scan() (line *Line, atEof bool, err error) { + scanning := t.s.Scan() + if !scanning { + err = t.s.Err() + if err == nil { + atEof = true + } + } + if err == nil { + t.line += 1 + str := t.s.Text() + line = &Line{str, t.line, strings.TrimLeft(str, " \t"), atEof} + } + return +} + +type Line struct { + LineText string + LineNumber int + TrimmedLineText string + AtEof bool +} + +func (g *Line) Indent() int { + return len(g.LineText) - len(g.TrimmedLineText) +} + +func (g *Line) IsEmpty() bool { + return len(g.TrimmedLineText) == 0 +} + +func (g *Line) IsEof() bool { + return g.AtEof +} + +func (g *Line) StartsWith(prefix string) bool { + return strings.HasPrefix(g.TrimmedLineText, prefix) +} + +func ParseGherkinDocument(in io.Reader, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) { + return ParseGherkinDocumentForLanguage(in, DefaultDialect, newId) +} + +func ParseGherkinDocumentForLanguage(in io.Reader, language string, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) { + + builder := NewAstBuilder(newId) + parser := NewParser(builder) + parser.StopAtFirstError(false) + matcher := NewLanguageMatcher(DialectsBuiltin(), language) + + scanner := NewScanner(in) + + err = parser.Parse(scanner, matcher) + + return builder.GetGherkinDocument(), err +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/matcher.go b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go new file mode 100644 index 000000000..fda4e6852 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go @@ -0,0 +1,301 @@ +package gherkin + +import ( + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + DefaultDialect = "en" + CommentPrefix = "#" + TagPrefix = "@" + TitleKeywordSeparator = ":" + TableCellSeparator = '|' + EscapeChar = '\\' + EscapedNewline = 'n' + DocstringSeparator = "\"\"\"" + DocstringAlternativeSeparator = "```" +) + +type matcher struct { + gdp DialectProvider + defaultLang string + lang string + dialect *Dialect + activeDocStringSeparator string + indentToRemove int + languagePattern *regexp.Regexp +} + +func NewMatcher(gdp DialectProvider) Matcher { + return &matcher{ + gdp: gdp, + defaultLang: DefaultDialect, + lang: DefaultDialect, + dialect: gdp.GetDialect(DefaultDialect), + languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"), + } +} + +func NewLanguageMatcher(gdp DialectProvider, language string) Matcher { + return &matcher{ + gdp: gdp, + defaultLang: language, + lang: language, + dialect: gdp.GetDialect(language), + languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"), + } +} + +func (m *matcher) Reset() { + m.indentToRemove = 0 + m.activeDocStringSeparator = "" + if m.lang != "en" { + m.dialect = m.gdp.GetDialect(m.defaultLang) + m.lang = "en" + } +} + +func (m *matcher) newTokenAtLocation(line, index int) (token *Token) { + column := index + 1 + token = new(Token) + token.GherkinDialect = m.lang + token.Location = &Location{line, column} + return +} + +func (m *matcher) MatchEOF(line *Line) (ok bool, token *Token, err error) { + if line.IsEof() { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeEOF + } + return +} + +func (m *matcher) MatchEmpty(line *Line) (ok bool, token *Token, err error) { + if line.IsEmpty() { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeEmpty + } + return +} + +func (m *matcher) MatchComment(line *Line) (ok bool, token *Token, err error) { + if line.StartsWith(CommentPrefix) { + token, ok = m.newTokenAtLocation(line.LineNumber, 0), true + token.Type = TokenTypeComment + token.Text = line.LineText + } + return +} + +func (m *matcher) MatchTagLine(line *Line) (ok bool, token *Token, err error) { + if !line.StartsWith(TagPrefix) { + return + } + commentDelimiter := regexp.MustCompile(`\s+` + CommentPrefix) + uncommentedLine := commentDelimiter.Split(line.TrimmedLineText, 2)[0] + var tags []*LineSpan + var column = line.Indent() + 1 + + splits := strings.Split(uncommentedLine, TagPrefix) + for i := range splits { + txt := strings.TrimRightFunc(splits[i], func(r rune) bool { + return unicode.IsSpace(r) + }) + if len(txt) == 0 { + continue + } + if !regexp.MustCompile(`^\S+$`).MatchString(txt) { + location := &Location{line.LineNumber, column} + msg := "A tag may not contain whitespace" + err = &parseError{msg, location} + break + } + tags = append(tags, &LineSpan{column, TagPrefix + txt}) + column = column + utf8.RuneCountInString(splits[i]) + 1 + } + + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeTagLine + token.Items = tags + + return +} + +func (m *matcher) matchTitleLine(line *Line, tokenType TokenType, keywords []string) (ok bool, token *Token, err error) { + for i := range keywords { + keyword := keywords[i] + if line.StartsWith(keyword + TitleKeywordSeparator) { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = tokenType + token.Keyword = keyword + token.Text = strings.Trim(line.TrimmedLineText[len(keyword)+1:], " ") + return + } + } + return +} + +func (m *matcher) MatchFeatureLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeFeatureLine, m.dialect.FeatureKeywords()) +} +func (m *matcher) MatchRuleLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeRuleLine, m.dialect.RuleKeywords()) +} +func (m *matcher) MatchBackgroundLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeBackgroundLine, m.dialect.BackgroundKeywords()) +} +func (m *matcher) MatchScenarioLine(line *Line) (ok bool, token *Token, err error) { + ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioKeywords()) + if ok || (err != nil) { + return ok, token, err + } + ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioOutlineKeywords()) + return ok, token, err +} +func (m *matcher) MatchExamplesLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeExamplesLine, m.dialect.ExamplesKeywords()) +} +func (m *matcher) MatchStepLine(line *Line) (ok bool, token *Token, err error) { + keywords := m.dialect.StepKeywords() + for i := range keywords { + keyword := keywords[i] + if line.StartsWith(keyword) { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeStepLine + token.Keyword = keyword + token.KeywordType = m.dialect.StepKeywordType(keyword) + token.Text = strings.Trim(line.TrimmedLineText[len(keyword):], " ") + return + } + } + return +} + +func (m *matcher) MatchDocStringSeparator(line *Line) (ok bool, token *Token, err error) { + if m.activeDocStringSeparator != "" { + if line.StartsWith(m.activeDocStringSeparator) { + // close + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeDocStringSeparator + token.Keyword = m.activeDocStringSeparator + + m.indentToRemove = 0 + m.activeDocStringSeparator = "" + } + return + } + if line.StartsWith(DocstringSeparator) { + m.activeDocStringSeparator = DocstringSeparator + } else if line.StartsWith(DocstringAlternativeSeparator) { + m.activeDocStringSeparator = DocstringAlternativeSeparator + } + if m.activeDocStringSeparator != "" { + // open + mediaType := line.TrimmedLineText[len(m.activeDocStringSeparator):] + m.indentToRemove = line.Indent() + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeDocStringSeparator + token.Keyword = m.activeDocStringSeparator + token.Text = mediaType + } + return +} + +func isSpaceAndNotNewLine(r rune) bool { + return unicode.IsSpace(r) && r != '\n' +} + +func (m *matcher) MatchTableRow(line *Line) (ok bool, token *Token, err error) { + var firstChar, firstPos = utf8.DecodeRuneInString(line.TrimmedLineText) + if firstChar == TableCellSeparator { + var cells []*LineSpan + var cell []rune + var startCol = line.Indent() + 2 // column where the current cell started + // start after the first separator, it's not included in the cell + for i, w, col := firstPos, 0, startCol; i < len(line.TrimmedLineText); i += w { + var char rune + char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:]) + if char == TableCellSeparator { + // append current cell + txt := string(cell) + + txtTrimmedLeadingSpace := strings.TrimLeftFunc(txt, isSpaceAndNotNewLine) + ind := utf8.RuneCountInString(txt) - utf8.RuneCountInString(txtTrimmedLeadingSpace) + txtTrimmed := strings.TrimRightFunc(txtTrimmedLeadingSpace, isSpaceAndNotNewLine) + cells = append(cells, &LineSpan{startCol + ind, txtTrimmed}) + // start building next + cell = make([]rune, 0) + startCol = col + 1 + } else if char == EscapeChar { + // skip this character but count the column + i += w + col++ + char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:]) + if char == EscapedNewline { + cell = append(cell, '\n') + } else { + if char != TableCellSeparator && char != EscapeChar { + cell = append(cell, EscapeChar) + } + cell = append(cell, char) + } + } else { + cell = append(cell, char) + } + col++ + } + + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeTableRow + token.Items = cells + } + return +} + +func (m *matcher) MatchLanguage(line *Line) (ok bool, token *Token, err error) { + matches := m.languagePattern.FindStringSubmatch(line.TrimmedLineText) + if len(matches) > 0 { + lang := matches[1] + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeLanguage + token.Text = lang + + dialect := m.gdp.GetDialect(lang) + if dialect == nil { + err = &parseError{"Language not supported: " + lang, token.Location} + } else { + m.lang = lang + m.dialect = dialect + } + } + return +} + +func (m *matcher) MatchOther(line *Line) (ok bool, token *Token, err error) { + token, ok = m.newTokenAtLocation(line.LineNumber, 0), true + token.Type = TokenTypeOther + + element := line.LineText + txt := strings.TrimLeft(element, " ") + + if len(element)-len(txt) > m.indentToRemove { + token.Text = m.unescapeDocString(element[m.indentToRemove:]) + } else { + token.Text = m.unescapeDocString(txt) + } + return +} + +func (m *matcher) unescapeDocString(text string) string { + if m.activeDocStringSeparator == DocstringSeparator { + return strings.Replace(text, "\\\"\\\"\\\"", DocstringSeparator, -1) + } + if m.activeDocStringSeparator == DocstringAlternativeSeparator { + return strings.Replace(text, "\\`\\`\\`", DocstringAlternativeSeparator, -1) + } + return text +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/messages.go b/vendor/github.com/cucumber/gherkin/go/v26/messages.go new file mode 100644 index 000000000..a3b7c1b71 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/messages.go @@ -0,0 +1,120 @@ +package gherkin + +import ( + "encoding/json" + "fmt" + "github.com/cucumber/messages/go/v21" + "io" + "io/ioutil" + "strings" +) + +func Messages( + paths []string, + decoder *json.Decoder, + language string, + includeSource bool, + includeGherkinDocument bool, + includePickles bool, + encoder *json.Encoder, + newId func() string, +) ([]messages.Envelope, error) { + var result []messages.Envelope + var err error + + handleMessage := func(result []messages.Envelope, message *messages.Envelope) ([]messages.Envelope, error) { + if encoder != nil { + err = encoder.Encode(message) + return result, err + } else { + result = append(result, *message) + } + + return result, err + } + + processSource := func(source *messages.Source) error { + if includeSource { + result, err = handleMessage(result, &messages.Envelope{ + Source: source, + }) + } + doc, err := ParseGherkinDocumentForLanguage(strings.NewReader(source.Data), language, newId) + if errs, ok := err.(parseErrors); ok { + // expected parse errors + for _, err := range errs { + if pe, ok := err.(*parseError); ok { + result, err = handleMessage(result, pe.asMessage(source.Uri)) + } else { + return fmt.Errorf("parse feature file: %s, unexpected error: %+v\n", source.Uri, err) + } + } + return nil + } + + if includeGherkinDocument { + doc.Uri = source.Uri + result, err = handleMessage(result, &messages.Envelope{ + GherkinDocument: doc, + }) + } + + if includePickles { + for _, pickle := range Pickles(*doc, source.Uri, newId) { + result, err = handleMessage(result, &messages.Envelope{ + Pickle: pickle, + }) + } + } + return nil + } + + if len(paths) == 0 { + for { + envelope := &messages.Envelope{} + err := decoder.Decode(envelope) + //marshal, err := json.Marshal(envelope) + //fmt.Println(string(marshal)) + if err == io.EOF { + break + } + + if envelope.Source != nil { + err = processSource(envelope.Source) + if err != nil { + return result, err + } + } + } + } else { + for _, path := range paths { + in, err := ioutil.ReadFile(path) + if err != nil { + return result, fmt.Errorf("read feature file: %s - %+v", path, err) + } + source := &messages.Source{ + Uri: path, + Data: string(in), + MediaType: "text/x.cucumber.gherkin+plain", + } + processSource(source) + } + } + + return result, err +} + +func (a *parseError) asMessage(uri string) *messages.Envelope { + return &messages.Envelope{ + ParseError: &messages.ParseError{ + Message: a.Error(), + Source: &messages.SourceReference{ + Uri: uri, + Location: &messages.Location{ + Line: int64(a.loc.Line), + Column: int64(a.loc.Column), + }, + }, + }, + } +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go b/vendor/github.com/cucumber/gherkin/go/v26/parser.go new file mode 100644 index 000000000..570e4babe --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go @@ -0,0 +1,4654 @@ +// +// This file is generated. Do not edit! Edit parser.go.razor instead. + +package gherkin + +import ( + "fmt" + "strings" +) + +type TokenType int + +const ( + TokenTypeNone TokenType = iota + TokenTypeEOF + TokenTypeEmpty + TokenTypeComment + TokenTypeTagLine + TokenTypeFeatureLine + TokenTypeRuleLine + TokenTypeBackgroundLine + TokenTypeScenarioLine + TokenTypeExamplesLine + TokenTypeStepLine + TokenTypeDocStringSeparator + TokenTypeTableRow + TokenTypeLanguage + TokenTypeOther +) + +func tokenTypeForRule(rt RuleType) TokenType { + return TokenTypeNone +} + +func (t TokenType) Name() string { + switch t { + case TokenTypeEOF: + return "EOF" + case TokenTypeEmpty: + return "Empty" + case TokenTypeComment: + return "Comment" + case TokenTypeTagLine: + return "TagLine" + case TokenTypeFeatureLine: + return "FeatureLine" + case TokenTypeRuleLine: + return "RuleLine" + case TokenTypeBackgroundLine: + return "BackgroundLine" + case TokenTypeScenarioLine: + return "ScenarioLine" + case TokenTypeExamplesLine: + return "ExamplesLine" + case TokenTypeStepLine: + return "StepLine" + case TokenTypeDocStringSeparator: + return "DocStringSeparator" + case TokenTypeTableRow: + return "TableRow" + case TokenTypeLanguage: + return "Language" + case TokenTypeOther: + return "Other" + } + return "" +} + +func (t TokenType) RuleType() RuleType { + switch t { + case TokenTypeEOF: + return RuleTypeEOF + case TokenTypeEmpty: + return RuleTypeEmpty + case TokenTypeComment: + return RuleTypeComment + case TokenTypeTagLine: + return RuleTypeTagLine + case TokenTypeFeatureLine: + return RuleTypeFeatureLine + case TokenTypeRuleLine: + return RuleTypeRuleLine + case TokenTypeBackgroundLine: + return RuleTypeBackgroundLine + case TokenTypeScenarioLine: + return RuleTypeScenarioLine + case TokenTypeExamplesLine: + return RuleTypeExamplesLine + case TokenTypeStepLine: + return RuleTypeStepLine + case TokenTypeDocStringSeparator: + return RuleTypeDocStringSeparator + case TokenTypeTableRow: + return RuleTypeTableRow + case TokenTypeLanguage: + return RuleTypeLanguage + case TokenTypeOther: + return RuleTypeOther + } + return RuleTypeNone +} + +type RuleType int + +const ( + RuleTypeNone RuleType = iota + + RuleTypeEOF + RuleTypeEmpty + RuleTypeComment + RuleTypeTagLine + RuleTypeFeatureLine + RuleTypeRuleLine + RuleTypeBackgroundLine + RuleTypeScenarioLine + RuleTypeExamplesLine + RuleTypeStepLine + RuleTypeDocStringSeparator + RuleTypeTableRow + RuleTypeLanguage + RuleTypeOther + RuleTypeGherkinDocument + RuleTypeFeature + RuleTypeFeatureHeader + RuleTypeRule + RuleTypeRuleHeader + RuleTypeBackground + RuleTypeScenarioDefinition + RuleTypeScenario + RuleTypeExamplesDefinition + RuleTypeExamples + RuleTypeExamplesTable + RuleTypeStep + RuleTypeStepArg + RuleTypeDataTable + RuleTypeDocString + RuleTypeTags + RuleTypeDescriptionHelper + RuleTypeDescription +) + +func (t RuleType) IsEOF() bool { + return t == RuleTypeEOF +} +func (t RuleType) Name() string { + switch t { + case RuleTypeEOF: + return "#EOF" + case RuleTypeEmpty: + return "#Empty" + case RuleTypeComment: + return "#Comment" + case RuleTypeTagLine: + return "#TagLine" + case RuleTypeFeatureLine: + return "#FeatureLine" + case RuleTypeRuleLine: + return "#RuleLine" + case RuleTypeBackgroundLine: + return "#BackgroundLine" + case RuleTypeScenarioLine: + return "#ScenarioLine" + case RuleTypeExamplesLine: + return "#ExamplesLine" + case RuleTypeStepLine: + return "#StepLine" + case RuleTypeDocStringSeparator: + return "#DocStringSeparator" + case RuleTypeTableRow: + return "#TableRow" + case RuleTypeLanguage: + return "#Language" + case RuleTypeOther: + return "#Other" + case RuleTypeGherkinDocument: + return "GherkinDocument" + case RuleTypeFeature: + return "Feature" + case RuleTypeFeatureHeader: + return "FeatureHeader" + case RuleTypeRule: + return "Rule" + case RuleTypeRuleHeader: + return "RuleHeader" + case RuleTypeBackground: + return "Background" + case RuleTypeScenarioDefinition: + return "ScenarioDefinition" + case RuleTypeScenario: + return "Scenario" + case RuleTypeExamplesDefinition: + return "ExamplesDefinition" + case RuleTypeExamples: + return "Examples" + case RuleTypeExamplesTable: + return "ExamplesTable" + case RuleTypeStep: + return "Step" + case RuleTypeStepArg: + return "StepArg" + case RuleTypeDataTable: + return "DataTable" + case RuleTypeDocString: + return "DocString" + case RuleTypeTags: + return "Tags" + case RuleTypeDescriptionHelper: + return "DescriptionHelper" + case RuleTypeDescription: + return "Description" + } + return "" +} + +type Location struct { + Line int + Column int +} + +type parseError struct { + msg string + loc *Location +} + +func (a *parseError) Error() string { + return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg) +} + +type parseErrors []error + +func (pe parseErrors) Error() string { + var ret = []string{"Parser errors:"} + for i := range pe { + ret = append(ret, pe[i].Error()) + } + return strings.Join(ret, "\n") +} + +func (p *parser) Parse(s Scanner, m Matcher) (err error) { + p.builder.Reset() + m.Reset() + ctxt := &parseContext{p, s, p.builder, m, nil, nil} + var state int + ctxt.startRule(RuleTypeGherkinDocument) + for { + gl, eof, err := ctxt.scan() + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + state, err = ctxt.match(state, gl) + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + if eof { + // done! \o/ + break + } + } + ctxt.endRule(RuleTypeGherkinDocument) + if len(ctxt.errors) > 0 { + return ctxt.errors + } + return +} + +type parseContext struct { + p *parser + s Scanner + b Builder + m Matcher + queue []*scanResult + errors parseErrors +} + +func (ctxt *parseContext) addError(e error) { + ctxt.errors = append(ctxt.errors, e) + // if (p.errors.length > 10) + // throw Errors.CompositeParserException.create(p.errors); +} + +type scanResult struct { + line *Line + atEof bool + err error +} + +func (ctxt *parseContext) scan() (*Line, bool, error) { + l := len(ctxt.queue) + if l > 0 { + x := ctxt.queue[0] + ctxt.queue = ctxt.queue[1:] + return x.line, x.atEof, x.err + } + return ctxt.s.Scan() +} + +func (ctxt *parseContext) startRule(r RuleType) (bool, error) { + ok, err := ctxt.b.StartRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) endRule(r RuleType) (bool, error) { + ok, err := ctxt.b.EndRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) build(t *Token) (bool, error) { + ok, err := ctxt.b.Build(t) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) { + switch state { + case 0: + return ctxt.matchAt0(line) + case 1: + return ctxt.matchAt1(line) + case 2: + return ctxt.matchAt2(line) + case 3: + return ctxt.matchAt3(line) + case 4: + return ctxt.matchAt4(line) + case 5: + return ctxt.matchAt5(line) + case 6: + return ctxt.matchAt6(line) + case 7: + return ctxt.matchAt7(line) + case 8: + return ctxt.matchAt8(line) + case 9: + return ctxt.matchAt9(line) + case 10: + return ctxt.matchAt10(line) + case 11: + return ctxt.matchAt11(line) + case 12: + return ctxt.matchAt12(line) + case 13: + return ctxt.matchAt13(line) + case 14: + return ctxt.matchAt14(line) + case 15: + return ctxt.matchAt15(line) + case 16: + return ctxt.matchAt16(line) + case 17: + return ctxt.matchAt17(line) + case 18: + return ctxt.matchAt18(line) + case 19: + return ctxt.matchAt19(line) + case 20: + return ctxt.matchAt20(line) + case 21: + return ctxt.matchAt21(line) + case 22: + return ctxt.matchAt22(line) + case 23: + return ctxt.matchAt23(line) + case 24: + return ctxt.matchAt24(line) + case 25: + return ctxt.matchAt25(line) + case 26: + return ctxt.matchAt26(line) + case 27: + return ctxt.matchAt27(line) + case 28: + return ctxt.matchAt28(line) + case 29: + return ctxt.matchAt29(line) + case 30: + return ctxt.matchAt30(line) + case 31: + return ctxt.matchAt31(line) + case 32: + return ctxt.matchAt32(line) + case 33: + return ctxt.matchAt33(line) + case 34: + return ctxt.matchAt34(line) + case 35: + return ctxt.matchAt35(line) + case 36: + return ctxt.matchAt36(line) + case 37: + return ctxt.matchAt37(line) + case 38: + return ctxt.matchAt38(line) + case 39: + return ctxt.matchAt39(line) + case 40: + return ctxt.matchAt40(line) + case 41: + return ctxt.matchAt41(line) + case 43: + return ctxt.matchAt43(line) + case 44: + return ctxt.matchAt44(line) + case 45: + return ctxt.matchAt45(line) + case 46: + return ctxt.matchAt46(line) + case 47: + return ctxt.matchAt47(line) + case 48: + return ctxt.matchAt48(line) + case 49: + return ctxt.matchAt49(line) + case 50: + return ctxt.matchAt50(line) + default: + return state, fmt.Errorf("Unknown state: %+v", state) + } +} + +// Start +func (ctxt *parseContext) matchAt0(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchLanguage(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.build(token) + return 1, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 0, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 0, err + } + + // var stateComment = "State: 0 - Start" + var expectedTokens = []string{"#EOF", "#Language", "#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 0, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0 +func (ctxt *parseContext) matchAt1(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 1, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 1, err + } + + // var stateComment = "State: 1 - GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0" + var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 1, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt2(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 2, err + } + + // var stateComment = "State: 2 - GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 2, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0 +func (ctxt *parseContext) matchAt3(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 4, err + } + + // var stateComment = "State: 3 - GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 3, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt4(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 4, err + } + + // var stateComment = "State: 4 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 4, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt5(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 5, err + } + + // var stateComment = "State: 5 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 5, err +} + +// GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0 +func (ctxt *parseContext) matchAt6(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 7, err + } + + // var stateComment = "State: 6 - GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 6, err +} + +// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt7(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 7, err + } + + // var stateComment = "State: 7 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 7, err +} + +// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt8(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 8, err + } + + // var stateComment = "State: 8 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 8, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt9(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 49, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 9, err + } + + // var stateComment = "State: 9 - GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 9, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt10(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 10, err + } + + // var stateComment = "State: 10 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 10, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt11(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 11, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 11, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 11, err + } + + // var stateComment = "State: 11 - GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 11, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0 +func (ctxt *parseContext) matchAt12(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 13, err + } + + // var stateComment = "State: 12 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 12, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt13(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 13, err + } + + // var stateComment = "State: 13 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 13, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt14(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 14, err + } + + // var stateComment = "State: 14 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 14, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt15(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 47, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 15, err + } + + // var stateComment = "State: 15 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 15, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt16(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 16, err + } + + // var stateComment = "State: 16 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 16, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt17(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 17, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 17, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 17, err + } + + // var stateComment = "State: 17 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 17, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0 +func (ctxt *parseContext) matchAt18(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 19, err + } + + // var stateComment = "State: 18 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 18, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt19(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 19, err + } + + // var stateComment = "State: 19 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 19, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt20(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 20, err + } + + // var stateComment = "State: 20 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 20, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt21(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 21, err + } + + // var stateComment = "State: 21 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 21, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt22(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 22, err + } + + // var stateComment = "State: 22 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 22, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0 +func (ctxt *parseContext) matchAt23(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 24, err + } + + // var stateComment = "State: 23 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 23, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt24(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 24, err + } + + // var stateComment = "State: 24 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 24, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt25(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 25, err + } + + // var stateComment = "State: 25 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 25, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0 +func (ctxt *parseContext) matchAt26(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 27, err + } + + // var stateComment = "State: 26 - GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 26, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt27(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 27, err + } + + // var stateComment = "State: 27 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 27, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt28(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 28, err + } + + // var stateComment = "State: 28 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 28, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt29(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 45, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 29, err + } + + // var stateComment = "State: 29 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 29, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt30(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 30, err + } + + // var stateComment = "State: 30 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 30, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt31(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 31, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 31, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 31, err + } + + // var stateComment = "State: 31 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 31, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0 +func (ctxt *parseContext) matchAt32(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 33, err + } + + // var stateComment = "State: 32 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 32, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt33(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 33, err + } + + // var stateComment = "State: 33 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 33, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt34(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 34, err + } + + // var stateComment = "State: 34 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 34, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt35(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 43, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 35, err + } + + // var stateComment = "State: 35 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 35, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt36(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 36, err + } + + // var stateComment = "State: 36 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 36, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt37(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 37, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 37, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 37, err + } + + // var stateComment = "State: 37 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 37, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0 +func (ctxt *parseContext) matchAt38(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 39, err + } + + // var stateComment = "State: 38 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 38, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt39(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 39, err + } + + // var stateComment = "State: 39 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 39, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt40(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 40, err + } + + // var stateComment = "State: 40 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 40, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt41(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 41, err + } + + // var stateComment = "State: 41 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 41, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt43(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 44, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 43, err + } + + // var stateComment = "State: 43 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 43, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt44(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 44, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 44, err + } + + // var stateComment = "State: 44 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 44, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt45(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 46, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 45, err + } + + // var stateComment = "State: 45 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 45, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt46(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 46, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 46, err + } + + // var stateComment = "State: 46 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 46, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt47(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 48, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 47, err + } + + // var stateComment = "State: 47 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 47, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt48(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 48, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 48, err + } + + // var stateComment = "State: 48 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 48, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt49(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 50, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 49, err + } + + // var stateComment = "State: 49 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 49, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt50(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 50, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 50, err + } + + // var stateComment = "State: 50 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 50, err +} + +type Matcher interface { + MatchEOF(line *Line) (bool, *Token, error) + MatchEmpty(line *Line) (bool, *Token, error) + MatchComment(line *Line) (bool, *Token, error) + MatchTagLine(line *Line) (bool, *Token, error) + MatchFeatureLine(line *Line) (bool, *Token, error) + MatchRuleLine(line *Line) (bool, *Token, error) + MatchBackgroundLine(line *Line) (bool, *Token, error) + MatchScenarioLine(line *Line) (bool, *Token, error) + MatchExamplesLine(line *Line) (bool, *Token, error) + MatchStepLine(line *Line) (bool, *Token, error) + MatchDocStringSeparator(line *Line) (bool, *Token, error) + MatchTableRow(line *Line) (bool, *Token, error) + MatchLanguage(line *Line) (bool, *Token, error) + MatchOther(line *Line) (bool, *Token, error) + Reset() +} + +func (ctxt *parseContext) isMatchEOF(line *Line) bool { + ok, _, _ := ctxt.matchEOF(line) + return ok +} +func (ctxt *parseContext) matchEOF(line *Line) (bool, *Token, error) { + return ctxt.m.MatchEOF(line) +} + +func (ctxt *parseContext) isMatchEmpty(line *Line) bool { + ok, _, _ := ctxt.matchEmpty(line) + return ok +} +func (ctxt *parseContext) matchEmpty(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchEmpty(line) +} + +func (ctxt *parseContext) isMatchComment(line *Line) bool { + ok, _, _ := ctxt.matchComment(line) + return ok +} +func (ctxt *parseContext) matchComment(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchComment(line) +} + +func (ctxt *parseContext) isMatchTagLine(line *Line) bool { + ok, _, _ := ctxt.matchTagLine(line) + return ok +} +func (ctxt *parseContext) matchTagLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchTagLine(line) +} + +func (ctxt *parseContext) isMatchFeatureLine(line *Line) bool { + ok, _, _ := ctxt.matchFeatureLine(line) + return ok +} +func (ctxt *parseContext) matchFeatureLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchFeatureLine(line) +} + +func (ctxt *parseContext) isMatchRuleLine(line *Line) bool { + ok, _, _ := ctxt.matchRuleLine(line) + return ok +} +func (ctxt *parseContext) matchRuleLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchRuleLine(line) +} + +func (ctxt *parseContext) isMatchBackgroundLine(line *Line) bool { + ok, _, _ := ctxt.matchBackgroundLine(line) + return ok +} +func (ctxt *parseContext) matchBackgroundLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchBackgroundLine(line) +} + +func (ctxt *parseContext) isMatchScenarioLine(line *Line) bool { + ok, _, _ := ctxt.matchScenarioLine(line) + return ok +} +func (ctxt *parseContext) matchScenarioLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchScenarioLine(line) +} + +func (ctxt *parseContext) isMatchExamplesLine(line *Line) bool { + ok, _, _ := ctxt.matchExamplesLine(line) + return ok +} +func (ctxt *parseContext) matchExamplesLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchExamplesLine(line) +} + +func (ctxt *parseContext) isMatchStepLine(line *Line) bool { + ok, _, _ := ctxt.matchStepLine(line) + return ok +} +func (ctxt *parseContext) matchStepLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchStepLine(line) +} + +func (ctxt *parseContext) isMatchDocStringSeparator(line *Line) bool { + ok, _, _ := ctxt.matchDocStringSeparator(line) + return ok +} +func (ctxt *parseContext) matchDocStringSeparator(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchDocStringSeparator(line) +} + +func (ctxt *parseContext) isMatchTableRow(line *Line) bool { + ok, _, _ := ctxt.matchTableRow(line) + return ok +} +func (ctxt *parseContext) matchTableRow(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchTableRow(line) +} + +func (ctxt *parseContext) isMatchLanguage(line *Line) bool { + ok, _, _ := ctxt.matchLanguage(line) + return ok +} +func (ctxt *parseContext) matchLanguage(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchLanguage(line) +} + +func (ctxt *parseContext) isMatchOther(line *Line) bool { + ok, _, _ := ctxt.matchOther(line) + return ok +} +func (ctxt *parseContext) matchOther(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchOther(line) +} + +func (ctxt *parseContext) lookahead0(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan() + queue = append(queue, &scanResult{line, atEof, err}) + + if false || ctxt.isMatchScenarioLine(line) { + match = true + break + } + if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match +} + +func (ctxt *parseContext) lookahead1(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan() + queue = append(queue, &scanResult{line, atEof, err}) + + if false || ctxt.isMatchExamplesLine(line) { + match = true + break + } + if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor new file mode 100644 index 000000000..7b173db1f --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor @@ -0,0 +1,309 @@ +@using Berp; +@helper CallProduction(ProductionRule production) +{ + switch(production.Type) + { + case ProductionRuleType.Start: + @:ctxt.startRule(@Raw("RuleType" + production.RuleName.Replace("#", ""))); + break; + case ProductionRuleType.End: + @:ctxt.endRule(@Raw("RuleType" + production.RuleName.Replace("#", ""))); + break; + case ProductionRuleType.Process: + @:ctxt.build(token); + break; + } +} +@helper HandleParserError(IEnumerable expectedTokens, State state) +{ + // var stateComment = "State: @state.Id - @Raw(state.Comment)" + var expectedTokens = []string{"@Raw(string.Join("\", \"", expectedTokens))"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens,", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens,", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return @state.Id, err} +@helper MatchToken(TokenType tokenType) +{ctxt.match@(tokenType)(line)} +@helper IsMatchToken(TokenType tokenType) +{ctxt.isMatch@(tokenType)(line)} +@helper TokenConst(Rule rule) +{@Raw("rule" + rule.Name.Replace("#", "Int"))} +// +// This file is generated. Do not edit! Edit parser.go.razor instead. + +package gherkin + +import ( + "fmt" + "strings" +) + +type TokenType int + +const ( + TokenTypeNone TokenType = iota + @foreach(var rule in Model.RuleSet.TokenRules) + { @Raw("TokenType" + rule.Name.Replace("#", "")) +} +) + +func tokenTypeForRule(rt RuleType) TokenType { + return TokenTypeNone +} + +func (t TokenType) Name() string { + switch t { + @foreach(var rule in Model.RuleSet.TokenRules) + { case @Raw("TokenType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name.Replace("#", ""))" +} + } + return "" +} + +func (t TokenType) RuleType() RuleType { + switch t { + @foreach(var rule in Model.RuleSet.TokenRules) + { case @Raw("TokenType" + rule.Name.Replace("#", "")): return @Raw("RuleType" + rule.Name.Replace("#", "")) +} + } + return RuleTypeNone +} + + +type RuleType int + +const ( + RuleTypeNone RuleType = iota + + @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule)) + { @Raw("RuleType" + rule.Name.Replace("#", "")) +} +) + +func (t RuleType) IsEOF() bool { + return t == RuleTypeEOF +} +func (t RuleType) Name() string { + switch t { + @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule)) + { case @Raw("RuleType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name)" +} + } + return "" +} + +type Location struct { + Line int + Column int +} + +type parseError struct { + msg string + loc *Location +} + +func (a *parseError) Error() string { + return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg) +} + +type parseErrors []error +func (pe parseErrors) Error() string { + var ret = []string{"Parser errors:"} + for i := range pe { + ret = append(ret, pe[i].Error()) + } + return strings.Join(ret,"\n") +} + +func (p *parser) Parse(s Scanner, m Matcher) (err error) { + p.builder.Reset() + m.Reset() + ctxt := &parseContext{p,s,p.builder,m,nil,nil} + var state int + ctxt.startRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name)) + for { + gl, eof, err := ctxt.scan() + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + state, err = ctxt.match(state, gl) + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + if eof { + // done! \o/ + break + } + } + ctxt.endRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name)) + if len(ctxt.errors) > 0 { + return ctxt.errors + } + return +} + +type parseContext struct { + p *parser + s Scanner + b Builder + m Matcher + queue []*scanResult + errors parseErrors +} + +func (ctxt *parseContext) addError(e error) { + ctxt.errors = append(ctxt.errors, e); + // if (p.errors.length > 10) + // throw Errors.CompositeParserException.create(p.errors); +} + +type scanResult struct { + line *Line + atEof bool + err error +} +func (ctxt *parseContext) scan() (*Line, bool, error) { + l := len(ctxt.queue) + if l > 0 { + x := ctxt.queue[0] + ctxt.queue = ctxt.queue[1:] + return x.line, x.atEof, x.err + } + return ctxt.s.Scan() +} + +func (ctxt *parseContext) startRule(r RuleType) (bool, error) { + ok, err := ctxt.b.StartRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) endRule(r RuleType) (bool, error) { + ok, err := ctxt.b.EndRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) build(t *Token) (bool, error) { + ok, err := ctxt.b.Build(t) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + + +func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) { + switch(state) { + @foreach(var state in Model.States.Values.Where(s => !s.IsEndState)) + { + @:case @state.Id: + @:return ctxt.matchAt@(state.Id)(line); + } + default: + return state, fmt.Errorf("Unknown state: %+v", state); + } +} + +@foreach(var state in Model.States.Values.Where(s => !s.IsEndState)) +{ + + // @Raw(state.Comment) +func (ctxt *parseContext) matchAt@(state.Id)(line *Line) (newState int, err error) { + @foreach(var transition in state.Transitions) + { + @:if ok, token, err := @MatchToken(transition.TokenType); ok { + if (transition.LookAheadHint != null) + { + @:if ctxt.lookahead@(transition.LookAheadHint.Id)(line) { + } + foreach(var production in transition.Productions) + { + @CallProduction(production) + } + @:return @transition.TargetState, err; + if (transition.LookAheadHint != null) + { + @:} + } + @:} + } + @HandleParserError(state.Transitions.Select(t => "#" + t.TokenType.ToString()).Distinct(), state) +} + +} + +type Matcher interface { + @foreach(var rule in Model.RuleSet.TokenRules) + { Match@(rule.Name.Replace("#", ""))(line *Line) (bool,*Token,error) +} + Reset() +} +@foreach(var rule in Model.RuleSet.TokenRules) +{ + +func (ctxt *parseContext) isMatch@(rule.Name.Replace("#", ""))(line *Line) bool { + ok, _, _ := ctxt.match@(rule.Name.Replace("#", ""))(line) + return ok +} +func (ctxt *parseContext) match@(rule.Name.Replace("#", ""))(line *Line) (bool, *Token, error) { + @if (rule.Name != "#EOF") + { + @:if line.IsEof() { + @: return false, nil, nil + @:} + } + return ctxt.m.Match@(rule.Name.Replace("#", ""))(line); +} + +} + +@foreach(var lookAheadHint in Model.RuleSet.LookAheadHints) +{ + +func (ctxt *parseContext) lookahead@(lookAheadHint.Id)(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan(); + queue = append(queue, &scanResult{line,atEof,err}); + + if false @foreach(var tokenType in lookAheadHint.ExpectedTokens) { || @IsMatchToken(tokenType)} { + match = true; + break + } + if !(false @foreach(var tokenType in lookAheadHint.Skip) { || @IsMatchToken(tokenType)}) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match; + } + +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/pickles.go b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go new file mode 100644 index 000000000..ad3fa84d2 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go @@ -0,0 +1,266 @@ +package gherkin + +import ( + "github.com/cucumber/messages/go/v21" + "strings" +) + +func Pickles(gherkinDocument messages.GherkinDocument, uri string, newId func() string) []*messages.Pickle { + pickles := make([]*messages.Pickle, 0) + if gherkinDocument.Feature == nil { + return pickles + } + language := gherkinDocument.Feature.Language + + pickles = compileFeature(pickles, *gherkinDocument.Feature, uri, language, newId) + return pickles +} + +func compileFeature(pickles []*messages.Pickle, feature messages.Feature, uri string, language string, newId func() string) []*messages.Pickle { + featureBackgroundSteps := make([]*messages.Step, 0) + featureTags := feature.Tags + for _, child := range feature.Children { + if child.Background != nil { + featureBackgroundSteps = append(featureBackgroundSteps, child.Background.Steps...) + } + if child.Rule != nil { + pickles = compileRule(pickles, child.Rule, featureTags, featureBackgroundSteps, uri, language, newId) + } + if child.Scenario != nil { + if len(child.Scenario.Examples) == 0 { + pickles = compileScenario(pickles, featureBackgroundSteps, child.Scenario, featureTags, uri, language, newId) + } else { + pickles = compileScenarioOutline(pickles, child.Scenario, featureTags, featureBackgroundSteps, uri, language, newId) + } + } + } + return pickles +} + +func compileRule( + pickles []*messages.Pickle, + rule *messages.Rule, + featureTags []*messages.Tag, + featureBackgroundSteps []*messages.Step, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + ruleBackgroundSteps := make([]*messages.Step, 0) + ruleBackgroundSteps = append(ruleBackgroundSteps, featureBackgroundSteps...) + tags := append(featureTags, rule.Tags...) + + for _, child := range rule.Children { + if child.Background != nil { + ruleBackgroundSteps = append(ruleBackgroundSteps, child.Background.Steps...) + } + if child.Scenario != nil { + if len(child.Scenario.Examples) == 0 { + pickles = compileScenario(pickles, ruleBackgroundSteps, child.Scenario, tags, uri, language, newId) + } else { + pickles = compileScenarioOutline(pickles, child.Scenario, tags, ruleBackgroundSteps, uri, language, newId) + } + } + } + return pickles + +} + +func compileScenarioOutline( + pickles []*messages.Pickle, + scenario *messages.Scenario, + inheritedTags []*messages.Tag, + backgroundSteps []*messages.Step, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + for _, examples := range scenario.Examples { + if examples.TableHeader == nil { + continue + } + variableCells := examples.TableHeader.Cells + for _, valuesRow := range examples.TableBody { + valueCells := valuesRow.Cells + + computedPickleSteps := make([]*messages.PickleStep, 0) + pickleBackgroundSteps := make([]*messages.PickleStep, 0) + + if len(scenario.Steps) > 0 { + pickleBackgroundSteps = pickleSteps(backgroundSteps, newId) + } + + // translate computedPickleSteps based on valuesRow + previous := messages.PickleStepType_UNKNOWN + for _, step := range scenario.Steps { + text := step.Text + for i, variableCell := range variableCells { + text = strings.Replace(text, "<"+variableCell.Value+">", valueCells[i].Value, -1) + } + + pickleStep := pickleStep(step, variableCells, valuesRow, newId, previous) + previous = pickleStep.Type + computedPickleSteps = append(computedPickleSteps, pickleStep) + } + + // translate pickle name + name := scenario.Name + for i, key := range variableCells { + name = strings.Replace(name, "<"+key.Value+">", valueCells[i].Value, -1) + } + + if len(computedPickleSteps) > 0 { + computedPickleSteps = append(pickleBackgroundSteps, computedPickleSteps...) + } + + id := newId() + tags := pickleTags(append(inheritedTags, append(scenario.Tags, examples.Tags...)...)) + + pickles = append(pickles, &messages.Pickle{ + Id: id, + Uri: uri, + Steps: computedPickleSteps, + Tags: tags, + Name: name, + Language: language, + AstNodeIds: []string{scenario.Id, valuesRow.Id}, + }) + } + } + return pickles +} + +func compileScenario( + pickles []*messages.Pickle, + backgroundSteps []*messages.Step, + scenario *messages.Scenario, + inheritedTags []*messages.Tag, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + steps := make([]*messages.PickleStep, 0) + if len(scenario.Steps) > 0 { + pickleBackgroundSteps := pickleSteps(backgroundSteps, newId) + steps = append(pickleBackgroundSteps, pickleSteps(scenario.Steps, newId)...) + } + tags := pickleTags(append(inheritedTags, scenario.Tags...)) + id := newId() + pickles = append(pickles, &messages.Pickle{ + Id: id, + Uri: uri, + Steps: steps, + Tags: tags, + Name: scenario.Name, + Language: language, + AstNodeIds: []string{scenario.Id}, + }) + return pickles +} + +func pickleDataTable(table *messages.DataTable, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleTable { + pickleTableRows := make([]*messages.PickleTableRow, len(table.Rows)) + for i, row := range table.Rows { + pickleTableCells := make([]*messages.PickleTableCell, len(row.Cells)) + for j, cell := range row.Cells { + pickleTableCells[j] = &messages.PickleTableCell{ + Value: interpolate(cell.Value, variableCells, valueCells), + } + } + pickleTableRows[i] = &messages.PickleTableRow{Cells: pickleTableCells} + } + return &messages.PickleTable{Rows: pickleTableRows} +} + +func pickleDocString(docString *messages.DocString, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleDocString { + return &messages.PickleDocString{ + MediaType: interpolate(docString.MediaType, variableCells, valueCells), + Content: interpolate(docString.Content, variableCells, valueCells), + } +} + +func pickleTags(tags []*messages.Tag) []*messages.PickleTag { + ptags := make([]*messages.PickleTag, len(tags)) + for i, tag := range tags { + ptags[i] = &messages.PickleTag{ + Name: tag.Name, + AstNodeId: tag.Id, + } + } + return ptags +} + +func pickleSteps(steps []*messages.Step, newId func() string) []*messages.PickleStep { + pickleSteps := make([]*messages.PickleStep, len(steps)) + previous := messages.PickleStepType_UNKNOWN + for i, step := range steps { + pickleStep := pickleStep(step, nil, nil, newId, previous) + previous = pickleStep.Type + pickleSteps[i] = pickleStep + } + return pickleSteps +} + +func pickleStep( + step *messages.Step, + variableCells []*messages.TableCell, + valuesRow *messages.TableRow, + newId func() string, + previous messages.PickleStepType, +) *messages.PickleStep { + + var valueCells []*messages.TableCell + if valuesRow != nil { + valueCells = valuesRow.Cells + } + + pickleStep := &messages.PickleStep{ + Id: newId(), + Text: interpolate(step.Text, variableCells, valueCells), + Type: mapType(step.KeywordType, previous), + AstNodeIds: []string{step.Id}, + } + if valuesRow != nil { + pickleStep.AstNodeIds = append(pickleStep.AstNodeIds, valuesRow.Id) + } + if step.DataTable != nil { + pickleStep.Argument = &messages.PickleStepArgument{ + DataTable: pickleDataTable(step.DataTable, variableCells, valueCells), + } + } + if step.DocString != nil { + pickleStep.Argument = &messages.PickleStepArgument{ + DocString: pickleDocString(step.DocString, variableCells, valueCells), + } + } + return pickleStep +} + +func mapType(keywordType messages.StepKeywordType, previous messages.PickleStepType) messages.PickleStepType { + switch keywordType { + case messages.StepKeywordType_UNKNOWN: + return messages.PickleStepType_UNKNOWN + case messages.StepKeywordType_CONTEXT: + return messages.PickleStepType_CONTEXT + case messages.StepKeywordType_ACTION: + return messages.PickleStepType_ACTION + case messages.StepKeywordType_OUTCOME: + return messages.PickleStepType_OUTCOME + case messages.StepKeywordType_CONJUNCTION: + return previous + default: + panic("Bad enum value for StepKeywordType") + } +} + +func interpolate(s string, variableCells []*messages.TableCell, valueCells []*messages.TableCell) string { + if variableCells == nil || valueCells == nil { + return s + } + + for i, variableCell := range variableCells { + s = strings.Replace(s, "<"+variableCell.Value+">", valueCells[i].Value, -1) + } + + return s +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.feature b/vendor/github.com/cucumber/gherkin/go/v26/test.feature new file mode 100644 index 000000000..dff77a22b --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/test.feature @@ -0,0 +1,151 @@ +Feature: + + Scenario: scenario 1 + Given text + + Scenario: scenario 2 + Given text + + Scenario: scenario 3 + Given text + + Scenario: scenario 4 + Given text + + Scenario: scenario 5 + Given text + + Scenario: scenario 6 + Given text + + Scenario: scenario 7 + Given text + + Scenario: scenario 8 + Given text + + Scenario: scenario 9 + Given text + + Scenario: scenario 10 + Given text + + Scenario: scenario 11 + Given text + + Scenario: scenario 12 + Given text + + Scenario: scenario 13 + Given text + + Scenario: scenario 14 + Given text + + Scenario: scenario 15 + Given text + + Scenario: scenario 16 + Given text + + Scenario: scenario 17 + Given text + + Scenario: scenario 18 + Given text + + Scenario: scenario 19 + Given text + + Scenario: scenario 20 + Given text + + Scenario: scenario 21 + Given text + + Scenario: scenario 22 + Given text + + Scenario: scenario 23 + Given text + + Scenario: scenario 24 + Given text + + Scenario: scenario 25 + Given text + + Scenario: scenario 26 + Given text + + Scenario: scenario 27 + Given text + + Scenario: scenario 28 + Given text + + Scenario: scenario 29 + Given text + + Scenario: scenario 30 + Given text + + Scenario: scenario 31 + Given text + + Scenario: scenario 32 + Given text + + Scenario: scenario 33 + Given text + + Scenario: scenario 34 + Given text + + Scenario: scenario 35 + Given text + + Scenario: scenario 36 + Given text + + Scenario: scenario 37 + Given text + + Scenario: scenario 38 + Given text + + Scenario: scenario 39 + Given text + + Scenario: scenario 40 + Given text + + Scenario: scenario 41 + Given text + + Scenario: scenario 42 + Given text + + Scenario: scenario 43 + Given text + + Scenario: scenario 44 + Given text + + Scenario: scenario 45 + Given text + + Scenario: scenario 46 + Given text + + Scenario: scenario 47 + Given text + + Scenario: scenario 48 + Given text + + Scenario: scenario 49 + Given text + + Scenario: scenario 50 + Given text diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.sh b/vendor/github.com/cucumber/gherkin/go/v26/test.sh new file mode 100644 index 000000000..97debf647 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +./bin/gherkin --no-ast --no-pickles test.feature | ./bin/gherkin --no-source --no-ast --json diff --git a/vendor/github.com/cucumber/godog/.gitignore b/vendor/github.com/cucumber/godog/.gitignore new file mode 100644 index 000000000..bd77fc9ff --- /dev/null +++ b/vendor/github.com/cucumber/godog/.gitignore @@ -0,0 +1,13 @@ +/cmd/godog/godog +/example/example +**/vendor/* +Gopkg.lock +Gopkg.toml + +.DS_Store +.idea +.vscode + +_artifacts + +vendor diff --git a/vendor/github.com/cucumber/godog/CHANGELOG.md b/vendor/github.com/cucumber/godog/CHANGELOG.md new file mode 100644 index 000000000..21a323e0a --- /dev/null +++ b/vendor/github.com/cucumber/godog/CHANGELOG.md @@ -0,0 +1,280 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org). + +This document is formatted according to the principles of [Keep A CHANGELOG](http://keepachangelog.com). + +## Unreleased + +## [v0.15.1] + +### Added +- Step text is added to "step is undefined" error - ([669](https://github.com/cucumber/godog/pull/669) - [vearutop](https://github.com/vearutop)) +- Localisation support by @MegaGrindStone in https://github.com/cucumber/godog/pull/665 +- feat: support uint types by @chengxilo in https://github.com/cucumber/godog/pull/695 + +### Changed +- Replace deprecated `::set-output` - ([681](https://github.com/cucumber/godog/pull/681) - [nodeg](https://github.com/nodeg)) + +### Fixed +- fix(errors): fix(errors): Fix expected Step argument count for steps with `context.Context` ([679](https://github.com/cucumber/godog/pull/679) - [tigh-latte](https://github.com/tigh-latte)) +- fix(formatter): On concurrent execution, execute formatter at end of Scenario - ([645](https://github.com/cucumber/godog/pull/645) - [tigh-latte](https://github.com/tigh-latte)) +- Pretty printing results now prints the line where the step is declared instead of the line where the handler is declared. ([668](https://github.com/cucumber/godog/pull/668) - [spencerc](https://github.com/SpencerC)) +- Update honnef.co/go/tools/cmd/staticcheck version in Makefile by @RezaZareiii in https://github.com/cucumber/godog/pull/670 +- fix: verify dogT exists in the context before using it by @cakoolen in https://github.com/cucumber/godog/pull/692 +- fix: change bang to being in README by @nahomEagleLion in https://github.com/cucumber/godog/pull/687 +- Mark junit test cases as skipped if no pickle step results available by @mrsheepuk in https://github.com/cucumber/godog/pull/597 +- Print step declaration line instead of handler declaration line by @SpencerC in https://github.com/cucumber/godog/pull/668 + +## [v0.15.0] + +### Added +- Improved the type checking of step return types and improved the error messages - ([647](https://github.com/cucumber/godog/pull/647) - [johnlon](https://github.com/johnlon)) +- Ambiguous step definitions will now be detected when strict mode is activated - ([636](https://github.com/cucumber/godog/pull/636)/([648](https://github.com/cucumber/godog/pull/648) - [johnlon](https://github.com/johnlon)) +- Provide support for attachments / embeddings including a new example in the examples dir - ([623](https://github.com/cucumber/godog/pull/623) - [johnlon](https://github.com/johnlon)) + +### Changed +- Formatters now have a `Close` method and associated `io.Writer` changed to `io.WriteCloser`. + +## [v0.14.1] + +### Added +- Provide testing.T-compatible interface on test context, allowing usage of assertion libraries such as testify's assert/require - ([571](https://github.com/cucumber/godog/pull/571) - [mrsheepuk](https://github.com/mrsheepuk)) +- Created releasing guidelines - ([608](https://github.com/cucumber/godog/pull/608) - [glibas](https://github.com/glibas)) + +### Fixed +- Step duration calculation - ([616](https://github.com/cucumber/godog/pull/616) - [iaroslav-ciupin](https://github.com/iaroslav-ciupin)) +- Invalid memory address or nil pointer dereference in RetrieveFeatures - ([566](https://github.com/cucumber/godog/pull/566) - [corneldamian](https://github.com/corneldamian)) + +## [v0.14.0] +### Added +- Improve ErrSkip handling, add test for Summary and operations order ([584](https://github.com/cucumber/godog/pull/584) - [vearutop](https://github.com/vearutop)) + +### Fixed +- Remove line overwriting for scenario outlines in cucumber formatter ([605](https://github.com/cucumber/godog/pull/605) - [glibas](https://github.com/glibas)) +- Remove duplicate warning message ([590](https://github.com/cucumber/godog/pull/590) - [vearutop](https://github.com/vearutop)) +- updated base formatter to set a scenario as passed unless there exist ([582](https://github.com/cucumber/godog/pull/582) - [roskee](https://github.com/roskee)) + +### Changed +- Update test.yml ([583](https://github.com/cucumber/godog/pull/583) - [vearutop](https://github.com/vearutop)) + +## [v0.13.0] +### Added +- Support for reading feature files from an `fs.FS` ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte)) +- Added keyword functions. ([509](https://github.com/cucumber/godog/pull/509) - [otrava7](https://github.com/otrava7)) +- Prefer go test to use of godog cli in README ([548](https://github.com/cucumber/godog/pull/548) - [danielhelfand](https://github.com/danielhelfand)) +- Use `fs.FS` abstraction for filesystem ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte)) +- Cancel context for each scenario ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm)) + +### Fixed +- Improve hooks invocation flow ([568](https://github.com/cucumber/godog/pull/568) - [vearutop](https://github.com/vearutop)) +- Result of testing.T respect strict option ([539](https://github.com/cucumber/godog/pull/539) - [eiel](https://github.com/eiel)) + +### Changed +- BREAKING CHANGE, upgraded cucumber and messages dependencies = ([515](https://github.com/cucumber/godog/pull/515) - [otrava7](https://github.com/otrava7)) + +## [v0.12.6] +### Changed +- Each scenario is run with a cancellable `context.Context` which is cancelled at the end of the scenario. ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm)) +- README example is updated with `context.Context` and `go test` usage. ([477](https://github.com/cucumber/godog/pull/477) - [vearutop](https://github.com/vearutop)) +- Removed deprecation of `godog.BindFlags`. ([498](https://github.com/cucumber/godog/pull/498) - [vearutop](https://github.com/vearutop)) +- Pretty Print when using rules. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject)) + +### Fixed +- Fixed a bug which would ignore the context returned from a substep.([488](https://github.com/cucumber/godog/pull/488) - [wichert](https://github.com/wichert)) +- Fixed a bug which would cause a panic when using the pretty formatter with a feature that contained a rule. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject)) +- Multiple invocations of AfterScenario hooks in case of undefined steps. ([494](https://github.com/cucumber/godog/pull/494) - [vearutop](https://github.com/vearutop)) +- Add a check for missing test files and raise a more helpful error. ([468](https://github.com/cucumber/godog/pull/468) - [ALCooper12](https://github.com/ALCooper12)) +- Fix version subcommand. Do not print usage if run subcommand fails. ([475](https://github.com/cucumber/godog/pull/475) - [coopernurse](https://github.com/coopernurse)) + +### Added +- Add new option for created features with parsing from byte slices. ([476](https://github.com/cucumber/godog/pull/476) - [akaswenwilk](https://github.com/akaswenwilk)) + +### Deprecated +- `godog` CLI tool prints deprecation warning. ([489](https://github.com/cucumber/godog/pull/489) - [vearutop](https://github.com/vearutop)) + +## [v0.12.5] +### Changed +- Changed underlying cobra command setup to return errors instead of calling `os.Exit` directly to enable simpler testing. ([454](https://github.com/cucumber/godog/pull/454) - [mxygem](https://github.com/mxygem)) +- Remove use of deprecated methods from `_examples`. ([460](https://github.com/cucumber/godog/pull/460) - [ricardogarfe](https://github.com/ricardogarfe)) + +### Fixed +- Support for go1.18 in `godog` cli mode ([466](https://github.com/cucumber/godog/pull/466) - [vearutop](https://github.com/vearutop)) + +## [v0.12.4] +### Added +- Allow suite-level configuration of steps and hooks ([453](https://github.com/cucumber/godog/pull/453) - [vearutop](https://github.com/vearutop)) + +## [v0.12.3] +### Added +- Automated binary releases with GitHub Actions ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) +- Automated binary versioning with `go install` ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) +- Module with local replace in examples ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) + +### Changed +- suggest to use `go install` instead of the deprecated `go get` to install the `godog` binary ([449](https://github.com/cucumber/godog/pull/449) - [dmitris](https://github.com/dmitris)) + +### Fixed +- After Scenario hook is called before After Step ([444](https://github.com/cucumber/godog/pull/444) - [vearutop](https://github.com/vearutop)) +- `check-go-version` in Makefile to run on WSL. ([443](https://github.com/cucumber/godog/pull/443) - [mxygem](https://github.com/mxygem)) + +## [v0.12.2] +### Fixed +- Error in `go mod tidy` with `GO111MODULE=off` ([436](https://github.com/cucumber/godog/pull/436) - [vearutop](https://github.com/vearutop)) + +## [v0.12.1] +### Fixed +- Unintended change of behavior in before step hook ([424](https://github.com/cucumber/godog/pull/424) - [nhatthm](https://github.com/nhatthm)) + +## [v0.12.0] +### Added +- Support for step definitions without return ([364](https://github.com/cucumber/godog/pull/364) - [titouanfreville](https://github.com/titouanfreville)) +- Contextualized hooks for scenarios and steps ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop)) +- Step result status in After hook ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop)) +- Support auto converting doc strings to plain strings ([380](https://github.com/cucumber/godog/pull/380) - [chirino](https://github.com/chirino)) +- Use multiple formatters in the same test run ([392](https://github.com/cucumber/godog/pull/392) - [vearutop](https://github.com/vearutop)) +- Added `RetrieveFeatures()` method to `godog.TestSuite` ([276](https://github.com/cucumber/godog/pull/276) - [radtriste](https://github.com/radtriste)) +- Added support to create custom formatters ([372](https://github.com/cucumber/godog/pull/372) - [leviable](https://github.com/leviable)) + +### Changed +- Upgraded gherkin-go to v19 and messages-go to v16 ([402](https://github.com/cucumber/godog/pull/402) - [mbow](https://github.com/mbow)) +- Generate simpler snippets that use *godog.DocString and *godog.Table ([379](https://github.com/cucumber/godog/pull/379) - [chirino](https://github.com/chirino)) + +### Deprecated +- `ScenarioContext.BeforeScenario`, use `ScenarioContext.Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.AfterScenario`, use `ScenarioContext.After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.BeforeStep`, use `ScenarioContext.StepContext().Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.AfterStep`, use `ScenarioContext.StepContext().After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) + +### Fixed +- Incorrect step definition output for Data Tables ([411](https://github.com/cucumber/godog/pull/411) - [karfrank](https://github.com/karfrank)) +- `ScenarioContext.AfterStep` not invoked after a failed case ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop))) +- Can't execute multiple specific scenarios in the same feature file ([414](https://github.com/cucumber/godog/pull/414) - [vearutop](https://github.com/vearutop))) + +## [v0.11.0] +### Added +- Created a simple example for a custom formatter ([330](https://github.com/cucumber/godog/pull/330) - [lonnblad](https://github.com/lonnblad)) +- --format junit:result.xml will now write to result.xml ([331](https://github.com/cucumber/godog/pull/331) - [lonnblad](https://github.com/lonnblad)) +- Added make commands to create artifacts and upload them to a github release ([333](https://github.com/cucumber/godog/pull/333) - [lonnblad](https://github.com/lonnblad)) +- Created release notes and changelog for v0.11.0 ([355](https://github.com/cucumber/godog/pull/355) - [lonnblad](https://github.com/lonnblad)) +- Created v0.11.0-rc2 ([362](https://github.com/cucumber/godog/pull/362) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Added Cobra for the Command Line Interface ([321](https://github.com/cucumber/godog/pull/321) - [lonnblad](https://github.com/lonnblad)) +- Added internal packages for formatters, storage and models ([323](https://github.com/cucumber/godog/pull/323) - [lonnblad](https://github.com/lonnblad)) +- Added an internal package for tags filtering ([326](https://github.com/cucumber/godog/pull/326) - [lonnblad](https://github.com/lonnblad)) +- Added an internal pkg for the builder ([327](https://github.com/cucumber/godog/pull/327) - [lonnblad](https://github.com/lonnblad)) +- Moved the parser code to a new internal pkg ([329](https://github.com/cucumber/godog/pull/329) - [lonnblad](https://github.com/lonnblad)) +- Moved StepDefinition to the formatters pkg ([332](https://github.com/cucumber/godog/pull/332) - [lonnblad](https://github.com/lonnblad)) +- Removed go1.12 and added go1.15 to CI config ([356](https://github.com/cucumber/godog/pull/356) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Improved the help text of the formatter flag in the run command ([347](https://github.com/cucumber/godog/pull/347) - [lonnblad](https://github.com/lonnblad)) +- Removed $GOPATH from the README.md and updated the example ([349](https://github.com/cucumber/godog/pull/349) - [lonnblad](https://github.com/lonnblad)) +- Fixed the undefined step definitions help ([350](https://github.com/cucumber/godog/pull/350) - [lonnblad](https://github.com/lonnblad)) +- Added a comment regarding running the examples within the $GOPATH ([352](https://github.com/cucumber/godog/pull/352) - [lonnblad](https://github.com/lonnblad)) +- doc(FAQ/TestMain): `testing.M.Run()` is optional ([353](https://github.com/cucumber/godog/pull/353) - [hansbogert](https://github.com/hansbogert)) +- Made a fix for the unstable Randomize Run tests ([354](https://github.com/cucumber/godog/pull/354) - [lonnblad](https://github.com/lonnblad)) +- Fixed an issue when go test is parsing command-line flags ([359](https://github.com/cucumber/godog/pull/359) - [lonnblad](https://github.com/lonnblad)) +- Make pickleStepIDs unique accross multiple paths ([366](https://github.com/cucumber/godog/pull/366) - [rickardenglund](https://github.com/rickardenglund)) + +### Removed +- Removed deprecated code ([322](https://github.com/cucumber/godog/pull/322) - [lonnblad](https://github.com/lonnblad)) + +## [v0.10.0] +### Added +- Added concurrency support to the pretty formatter ([275](https://github.com/cucumber/godog/pull/275) - [lonnblad](https://github.com/lonnblad)) +- Added concurrency support to the events formatter ([274](https://github.com/cucumber/godog/pull/274) - [lonnblad](https://github.com/lonnblad)) +- Added concurrency support to the cucumber formatter ([273](https://github.com/cucumber/godog/pull/273) - [lonnblad](https://github.com/lonnblad)) +- Added an example for how to use assertion pkgs like testify with godog ([289](https://github.com/cucumber/godog/pull/289) - [lonnblad](https://github.com/lonnblad)) +- Added the new TestSuiteInitializer and ScenarioInitializer ([294](https://github.com/cucumber/godog/pull/294) - [lonnblad](https://github.com/lonnblad)) +- Added an in-mem storage for pickles ([304](https://github.com/cucumber/godog/pull/304) - [lonnblad](https://github.com/lonnblad)) +- Added Pickle and PickleStep results to the in-mem storage ([305](https://github.com/cucumber/godog/pull/305) - [lonnblad](https://github.com/lonnblad)) +- Added features to the in-mem storage ([306](https://github.com/cucumber/godog/pull/306) - [lonnblad](https://github.com/lonnblad)) +- Broke out some code from massive files into new files ([307](https://github.com/cucumber/godog/pull/307) - [lonnblad](https://github.com/lonnblad)) +- Added support for concurrent scenarios ([311](https://github.com/cucumber/godog/pull/311) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Broke out snippets gen and added sorting on method name ([271](https://github.com/cucumber/godog/pull/271) - [lonnblad](https://github.com/lonnblad)) +- Updated so that we run all tests concurrent now ([278](https://github.com/cucumber/godog/pull/278) - [lonnblad](https://github.com/lonnblad)) +- Moved fmt tests to a godog_test pkg and restructured the fmt output tests ([295](https://github.com/cucumber/godog/pull/295) - [lonnblad](https://github.com/lonnblad)) +- Moved builder tests to a godog_test pkg ([296](https://github.com/cucumber/godog/pull/296) - [lonnblad](https://github.com/lonnblad)) +- Made the builder tests run in parallel ([298](https://github.com/cucumber/godog/pull/298) - [lonnblad](https://github.com/lonnblad)) +- Refactored suite_context.go ([300](https://github.com/cucumber/godog/pull/300) - [lonnblad](https://github.com/lonnblad)) +- Added better testing of the Context Initializers and TestSuite{}.Run() ([301](https://github.com/cucumber/godog/pull/301) - [lonnblad](https://github.com/lonnblad)) +- Updated the README.md ([302](https://github.com/cucumber/godog/pull/302) - [lonnblad](https://github.com/lonnblad)) +- Unexported some exported properties in unexported structs ([303](https://github.com/cucumber/godog/pull/303) - [lonnblad](https://github.com/lonnblad)) +- Refactored some states in the formatters and feature struct ([310](https://github.com/cucumber/godog/pull/310) - [lonnblad](https://github.com/lonnblad)) + +### Deprecated +- Deprecated SuiteContext and ConcurrentFormatter ([314](https://github.com/cucumber/godog/pull/314) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Fixed failing builder tests due to the v0.9.0 change ([lonnblad](https://github.com/lonnblad)) +- Update paths to screenshots for examples ([270](https://github.com/cucumber/godog/pull/270) - [leviable](https://github.com/leviable)) +- Made progress formatter verification a bit more accurate ([lonnblad](https://github.com/lonnblad)) +- Added comparison between single and multi threaded runs ([272](https://github.com/cucumber/godog/pull/272) - [lonnblad](https://github.com/lonnblad)) +- Fixed issue with empty feature file causing nil pointer deref ([288](https://github.com/cucumber/godog/pull/288) - [lonnblad](https://github.com/lonnblad)) +- Updated linting checks in circleci config and fixed linting issues ([290](https://github.com/cucumber/godog/pull/290) - [lonnblad](https://github.com/lonnblad)) +- Readded some legacy doc for FeatureContext ([297](https://github.com/cucumber/godog/pull/297) - [lonnblad](https://github.com/lonnblad)) +- Fixed an issue with calculating time for junit testsuite ([308](https://github.com/cucumber/godog/pull/308) - [lonnblad](https://github.com/lonnblad)) +- Fixed so that we don't execute features with zero scenarios ([315](https://github.com/cucumber/godog/pull/315) - [lonnblad](https://github.com/lonnblad)) +- Fixed the broken --random flag ([317](https://github.com/cucumber/godog/pull/317) - [lonnblad](https://github.com/lonnblad)) + +### Removed +- Removed pre go112 build code ([293](https://github.com/cucumber/godog/pull/293) - [lonnblad](https://github.com/lonnblad)) +- Removed the deprecated feature hooks ([312](https://github.com/cucumber/godog/pull/312) - [lonnblad](https://github.com/lonnblad)) + +## [0.9.0] +### Changed +- Run godog features in CircleCI in strict mode ([mxygem](https://github.com/mxygem)) +- Removed TestMain call in `suite_test.go` for CI. ([mxygem](https://github.com/mxygem)) +- Migrated to [gherkin-go - v11.0.0](https://github.com/cucumber/gherkin-go/releases/tag/v11.0.0). ([240](https://github.com/cucumber/godog/pull/240) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Fixed the time attributes in the JUnit formatter. ([232](https://github.com/cucumber/godog/pull/232) - [lonnblad](https://github.com/lonnblad)) +- Re enable custom formatters. ([238](https://github.com/cucumber/godog/pull/238) - [ericmcbride](https://github.com/ericmcbride)) +- Added back suite_test.go ([mxygem](https://github.com/mxygem)) +- Normalise module paths for use on Windows ([242](https://github.com/cucumber/godog/pull/242) - [gjtaylor](https://github.com/gjtaylor)) +- Fixed panic in indenting function `s` ([247](https://github.com/cucumber/godog/pull/247) - [titouanfreville](https://github.com/titouanfreville)) +- Fixed wrong version in API example ([263](https://github.com/cucumber/godog/pull/263) - [denis-trofimov](https://github.com/denis-trofimov)) + +## [0.8.1] +### Added +- Link in Readme to the Slack community. ([210](https://github.com/cucumber/godog/pull/210) - [smikulcik](https://github.com/smikulcik)) +- Added run tests for Cucumber formatting. ([214](https://github.com/cucumber/godog/pull/214), [216](https://github.com/cucumber/godog/pull/216) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Renamed the `examples` directory to `_examples`, removing dependencies from the Go module ([218](https://github.com/cucumber/godog/pull/218) - [axw](https://github.com/axw)) + +### Fixed +- Find/Replaced references to DATA-DOG/godog -> cucumber/godog for docs. ([209](https://github.com/cucumber/godog/pull/209) - [smikulcik](https://github.com/smikulcik)) +- Fixed missing links in changelog to be correctly included! ([mxygem](https://github.com/mxygem)) + +## [0.8.0] +### Added +- Added initial CircleCI config. ([mxygem](https://github.com/mxygem)) +- Added concurrency support for JUnit formatting ([lonnblad](https://github.com/lonnblad)) + +### Changed +- Changed code references to DATA-DOG/godog to cucumber/godog to help get things building correctly. ([mxygem](https://github.com/mxygem)) + +[v0.15.1]: https://github.com/cucumber/godog/compare/v0.15.0...v0.15.1 +[v0.15.0]: https://github.com/cucumber/godog/compare/v0.14.1...v0.15.0 +[v0.14.1]: https://github.com/cucumber/godog/compare/v0.14.0...v0.14.1 +[v0.14.0]: https://github.com/cucumber/godog/compare/v0.13.0...v0.14.0 +[v0.13.0]: https://github.com/cucumber/godog/compare/v0.12.6...v0.13.0 +[v0.12.6]: https://github.com/cucumber/godog/compare/v0.12.5...v0.12.6 +[v0.12.5]: https://github.com/cucumber/godog/compare/v0.12.4...v0.12.5 +[v0.12.4]: https://github.com/cucumber/godog/compare/v0.12.3...v0.12.4 +[v0.12.3]: https://github.com/cucumber/godog/compare/v0.12.2...v0.12.3 +[v0.12.2]: https://github.com/cucumber/godog/compare/v0.12.1...v0.12.2 +[v0.12.1]: https://github.com/cucumber/godog/compare/v0.12.0...v0.12.1 +[v0.12.0]: https://github.com/cucumber/godog/compare/v0.11.0...v0.12.0 +[v0.11.0]: https://github.com/cucumber/godog/compare/v0.10.0...v0.11.0 +[v0.10.0]: https://github.com/cucumber/godog/compare/v0.9.0...v0.10.0 +[0.9.0]: https://github.com/cucumber/godog/compare/v0.8.1...v0.9.0 +[0.8.1]: https://github.com/cucumber/godog/compare/v0.8.0...v0.8.1 +[0.8.0]: https://github.com/cucumber/godog/compare/v0.7.13...v0.8.0 diff --git a/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md new file mode 100644 index 000000000..070337965 --- /dev/null +++ b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md @@ -0,0 +1,113 @@ +# Change LOG + +**2020-02-06** +- move to new [CHANGELOG.md](CHANGELOG.md) + +**2020-01-31** +- change license to MIT and moving project repository to **cucumber** + organization. + +**2018-11-16** +- added formatter output test suite, currently mainly pretty format + tested. +- these tests, helped to identify some output format issues. + +**2018-11-12** +- proper go module support added for `godog` command build. +- added build tests. + +**2018-10-27** +- support go1.11 new compiler and linker changes for **godog** command. +- support go1.11 modules and `go mod` builds. +- `BindFlags` now has a prefix option for flags, so that `go test` command + can avoid flag name collisions. +- `BindFlags` respect default options provided for binding, so that it + does not override predefined options when flags are bind, see #144. +- Minor patch to support tag filters on example tables for + ScenarioOutline. +- Minor patch for pretty printer, when scenario has no steps, comment + possition computation was in panic. + +**2018-03-04** +- support go1.10 new compiler and linker changes for **godog** command. + +**2017-08-31** +- added **BeforeFeature** and **AfterFeature** hooks. +- failed multistep error is now prepended with a parent step text in order + to determine failed nested step. +- pretty format now removes the step definition location package name in + comment next to step if the step definition matches tested package. If + step definition is imported from other package, full package name will + be printed. + +**2017-05-04** +- added **--strict** option in order to fail suite when there are pending + or undefined steps. By default, suite passes and treats pending or + undefined steps as TODOs. + +**2017-04-29** - **v0.7.0** +- added support for nested steps. From now on, it is possible to return + **godog.Steps** instead of an **error** in the step definition func. + This change introduced few minor changes in **Formatter** interface. Be + sure to adapt the changes if you have custom formatters. + +**2017-04-27** +- added an option to randomize scenario execution order, so we could + ensure that scenarios do not depend on global state. +- godog was manually sorting feature files by name. Now it just runs them + in given order, you may sort them anyway you like. For example `godog + $(find . -name '*.feature' | sort)` + +**2016-10-30** - **v0.6.0** +- added experimental **events** format, this might be used for unified + cucumber formats. But should be not adapted widely, since it is highly + possible that specification will change. +- added **RunWithOptions** method which allows to easily run godog from + **TestMain** without needing to simulate flag arguments. These options + now allows to configure output writer. +- added flag **-o, --output=runner.binary** which only compiles the test + runner executable, but does not execute it. +- **FlagSet** initialization now takes io.Writer as output for help text + output. It was not showing nice colors on windows before. + **--no-colors** option only applies to test run output. + +**2016-06-14** - **v0.5.0** +- godog now uses **go tool compile** and **go tool link** to support + vendor directory dependencies. It also compiles test executable the same + way as standard **go test** utility. With this change, only go + versions from **1.5** are now supported. + +**2016-06-01** +- parse flags in main command, to show version and help without needing + to compile test package and buildable go sources. + +**2016-05-28** +- show nicely formatted called step func name and file path + +**2016-05-26** +- pack gherkin dependency in a subpackage to prevent compatibility + conflicts in the future. If recently upgraded, probably you will need to + reference gherkin as `github.com/DATA-DOG/godog/gherkin` instead. + +**2016-05-25** +- refactored test suite build tooling in order to use standard **go test** + tool. Which allows to compile package with godog runner script in **go** + idiomatic way. It also supports all build environment options as usual. +- **godog.Run** now returns an **int** exit status. It was not returning + anything before, so there is no compatibility breaks. + +**2016-03-04** +- added **junit** compatible output formatter, which prints **xml** + results to **os.Stdout** +- fixed #14 which skipped printing background steps when there was + scenario outline in feature. + +**2015-07-03** +- changed **godog.Suite** from interface to struct. Context registration should be updated accordingly. The reason +for change: since it exports the same methods and there is no need to mock a function in tests, there is no +obvious reason to keep an interface. +- in order to support running suite concurrently, needed to refactor an entry point of application. The **Run** method +now is a func of godog package which initializes and run the suite (or more suites). Method **New** is removed. This +change made godog a little cleaner. +- renamed **RegisterFormatter** func to **Format** to be more consistent. + diff --git a/vendor/github.com/cucumber/godog/CONTRIBUTING.md b/vendor/github.com/cucumber/godog/CONTRIBUTING.md new file mode 100644 index 000000000..c21ee4263 --- /dev/null +++ b/vendor/github.com/cucumber/godog/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# Welcome 💖 + +Before anything else, thank you for taking some of your precious time to help this project move forward. ❤️ + +If you're new to open source and feeling a bit nervous 😳, we understand! We recommend watching [this excellent guide](https://egghead.io/talks/git-how-to-make-your-first-open-source-contribution) +to give you a grounding in some of the basic concepts. You could also watch [this talk](https://www.youtube.com/watch?v=tuSk6dMoTIs) from our very own wonderful [Marit van Dijk](https://github.com/mlvandijk) on her experiences contributing to Cucumber. + +We want you to feel safe to make mistakes, and ask questions. If anything in this guide or anywhere else in the codebase doesn't make sense to you, please let us know! It's through your feedback that we can make this codebase more welcoming, so we'll be glad to hear thoughts. + +You can chat with us in the `#committers` channel in our [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord), or feel free to [raise an issue] if you're experiencing any friction trying make your contribution. + +## Setup + +To get your development environment set up, you'll need to [install Go]. We're currently using version 1.17 for development. + +Once that's done, try running the tests: + + make test + +If everything passes, you're ready to hack! + +[install go]: https://golang.org/doc/install +[community Discord]: https://cucumber.io/community#discord +[raise an issue]: https://github.com/cucumber/godog/issues/new/choose + +## Changing dependencies + +If dependencies have changed, you will also need to update the _examples module. `go mod tidy` should be sufficient. \ No newline at end of file diff --git a/vendor/github.com/cucumber/godog/LICENSE b/vendor/github.com/cucumber/godog/LICENSE new file mode 100644 index 000000000..97dcbd65f --- /dev/null +++ b/vendor/github.com/cucumber/godog/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) SmartBear + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cucumber/godog/Makefile b/vendor/github.com/cucumber/godog/Makefile new file mode 100644 index 000000000..06c95c4e0 --- /dev/null +++ b/vendor/github.com/cucumber/godog/Makefile @@ -0,0 +1,77 @@ +.PHONY: test gherkin bump cover + +VERS ?= $(shell git symbolic-ref -q --short HEAD || git describe --tags --exact-match) + +GO_MAJOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1) +GO_MINOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1 +MINIMUM_SUPPORTED_GO_MINOR_VERSION = 16 +GO_VERSION_VALIDATION_ERR_MSG = Go version $(GO_MAJOR_VERSION).$(GO_MINOR_VERSION) is not supported, please update to at least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION) + +.PHONY: check-go-version +check-go-version: + @if [ $(GO_MAJOR_VERSION) -gt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + exit 0 ;\ + elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + fi + +test: check-go-version + @echo "running all tests" + @go fmt ./... + @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog + @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog/cmd/godog + go vet ./... + go test -race ./... + go run ./cmd/godog -f progress -c 4 + +gherkin: + @if [ -z "$(VERS)" ]; then echo "Provide gherkin version like: 'VERS=commit-hash'"; exit 1; fi + @rm -rf gherkin + @mkdir gherkin + @curl -s -L https://github.com/cucumber/gherkin-go/tarball/$(VERS) | tar -C gherkin -zx --strip-components 1 + @rm -rf gherkin/{.travis.yml,.gitignore,*_test.go,gherkin-generate*,*.razor,*.jq,Makefile,CONTRIBUTING.md} + +bump: + @if [ -z "$(VERSION)" ]; then echo "Provide version like: 'VERSION=$(VERS) make bump'"; exit 1; fi + @echo "bumping version from: $(VERS) to $(VERSION)" + @sed -i.bak 's/$(VERS)/$(VERSION)/g' godog.go + @sed -i.bak 's/$(VERS)/$(VERSION)/g' _examples/api/features/version.feature + @find . -name '*.bak' | xargs rm + +cover: + go test -race -coverprofile=coverage.txt + go tool cover -html=coverage.txt + rm coverage.txt + +ARTIFACT_DIR := _artifacts + +# To upload artifacts for the current version; +# execute: make upload +# +# Check https://github.com/tcnksm/ghr for usage of ghr +upload: artifacts + ghr -replace $(VERS) $(ARTIFACT_DIR) + +# To build artifacts for the current version; +# execute: make artifacts +artifacts: + rm -rf $(ARTIFACT_DIR) + mkdir $(ARTIFACT_DIR) + + $(call _build,darwin,amd64) + $(call _build,linux,amd64) + $(call _build,linux,arm64) + +define _build + mkdir $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2 + env GOOS=$1 GOARCH=$2 go build -ldflags "-X github.com/cucumber/godog.Version=$(VERS)" -o $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/godog ./cmd/godog + cp README.md $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/README.md + cp LICENSE $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/LICENSE + cd $(ARTIFACT_DIR) && tar -c --use-compress-program="pigz --fast" -f godog-$(VERS)-$1-$2.tar.gz godog-$(VERS)-$1-$2 && cd .. + rm -rf $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2 +endef diff --git a/vendor/github.com/cucumber/godog/README.md b/vendor/github.com/cucumber/godog/README.md new file mode 100644 index 000000000..dceacf167 --- /dev/null +++ b/vendor/github.com/cucumber/godog/README.md @@ -0,0 +1,583 @@ +[![#StandWithUkraine](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/badges/StandWithUkraine.svg)](https://vshymanskyy.github.io/StandWithUkraine) +[![Build Status](https://github.com/cucumber/godog/workflows/test/badge.svg)](https://github.com/cucumber/godog/actions?query=branch%main+workflow%3Atest) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cucumber/godog)](https://pkg.go.dev/github.com/cucumber/godog) +[![codecov](https://codecov.io/gh/cucumber/godog/branch/master/graph/badge.svg)](https://codecov.io/gh/cucumber/godog) +[![pull requests](https://oselvar.com/api/badge?label=pull%20requests&csvUrl=https%3A%2F%2Fraw.githubusercontent.com%2Fcucumber%2Foselvar-github-metrics%2Fmain%2Fdata%2Fcucumber%2Fgodog%2FpullRequests.csv)](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog) +[![issues](https://oselvar.com/api/badge?label=issues&csvUrl=https%3A%2F%2Fraw.githubusercontent.com%2Fcucumber%2Foselvar-github-metrics%2Fmain%2Fdata%2Fcucumber%2Fgodog%2Fissues.csv)](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog) + +# Godog + +

Godog logo

+ +**The API is likely to change a few times before we reach 1.0.0** + +Please read the full README, you may find it very useful. And do not forget to peek into the [Release Notes](https://github.com/cucumber/godog/blob/master/release-notes) and the [CHANGELOG](https://github.com/cucumber/godog/blob/master/CHANGELOG.md) from time to time. + +Package godog is the official Cucumber BDD framework for Golang, it merges specification and test documentation into one cohesive whole, using Gherkin formatted scenarios in the format of Given, When, Then. + +The project was inspired by [behat][behat] and [cucumber][cucumber]. + +## Why Godog/Cucumber + +### A single source of truth + +Godog merges specification and test documentation into one cohesive whole. + +### Living documentation + +Because they're automatically tested by Godog, your specifications are +always being up-to-date. + +### Focus on the customer + +Business and IT don't always understand each other. Godog's executable specifications encourage closer collaboration, helping teams keep the business goal in mind at all times. + +### Less rework + +When automated testing is this much fun, teams can easily protect themselves from costly regressions. + +### Read more +- [Behaviour-Driven Development](https://cucumber.io/docs/bdd/) +- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/) + +## Contributions + +Godog is a community driven Open Source Project within the Cucumber organization. We [welcome contributions from everyone](https://cucumber.io/blog/open-source/tackling-structural-racism-(and-sexism)-in-open-so/), and we're ready to support you if you have the enthusiasm to contribute. + +See the [contributing guide] for more detail on how to get started. + +See the [releasing guide] for release flow details. + +## Getting help + +We have a [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord) where you can chat with other users, developers, and BDD practitioners. + +## Examples + +You can find a few examples [here](/_examples). + +**Note** that if you want to execute any of the examples and have the Git repository checked out in the `$GOPATH`, you need to use: `GO111MODULE=off`. [Issue](https://github.com/cucumber/godog/issues/344) for reference. + +### Godogs + +The following example can be [found here](/_examples/godogs). + +#### Step 1 - Setup a go module + +Create a new go module named **godogs** in your go workspace by running `mkdir godogs` + +From now on, use **godogs** as your working directory by running `cd godogs` + +Initiate the go module inside the **godogs** directory by running `go mod init godogs` + +#### Step 2 - Create gherkin feature + +Imagine we have a **godog cart** to serve godogs for lunch. + +First of all, we describe our feature in plain text: + +``` gherkin +Feature: eat godogs + In order to be happy + As a hungry gopher + I need to be able to eat godogs + + Scenario: Eat 5 out of 12 + Given there are 12 godogs + When I eat 5 + Then there should be 7 remaining +``` + +Run `vim features/godogs.feature` and add the text above into the vim editor and save the file. + +#### Step 3 - Create godog step definitions + +**NOTE:** Same as **go test**, godog respects package level isolation. All your step definitions should be in your tested package root directory. In this case: **godogs**. + +Create and copy the step definitions below into a new file by running `vim godogs_test.go`: +``` go +package main + +import "github.com/cucumber/godog" + +func iEat(arg1 int) error { + return godog.ErrPending +} + +func thereAreGodogs(arg1 int) error { + return godog.ErrPending +} + +func thereShouldBeRemaining(arg1 int) error { + return godog.ErrPending +} + +func InitializeScenario(ctx *godog.ScenarioContext) { + ctx.Step(`^there are (\d+) godogs$`, thereAreGodogs) + ctx.Step(`^I eat (\d+)$`, iEat) + ctx.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +Alternatively, you can also specify the keyword (Given, When, Then...) when creating the step definitions: +``` go +func InitializeScenario(ctx *godog.ScenarioContext) { + ctx.Given(`^there are (\d+) godogs$`, thereAreGodogs) + ctx.When(`^I eat (\d+)$`, iEat) + ctx.Then(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +Our module should now look like this: +``` +godogs +- features + - godogs.feature +- go.mod +- go.sum +- godogs_test.go +``` + +Run `go test` in the **godogs** directory to run the steps you have defined. You should now see that the scenario runs +with a warning stating there are no tests to run. +``` +testing: warning: no tests to run +PASS +ok godogs 0.225s +``` + +By adding some logic to these steps, you will be able to thoroughly test the feature you just defined. + +#### Step 4 - Create the main program to test + +Let's keep it simple by only requiring an amount of **godogs** for now. + +Create and copy the code below into a new file by running `vim godogs.go` +```go +package main + +// Godogs available to eat +var Godogs int + +func main() { /* usual main func */ } +``` + +Our module should now look like this: +``` +godogs +- features + - godogs.feature +- go.mod +- go.sum +- godogs.go +- godogs_test.go +``` + +#### Step 5 - Add some logic to the step definitions + +Now lets implement our step definitions to test our feature requirements. + +Replace the contents of `godogs_test.go` with the code below by running `vim godogs_test.go`. + +```go +package main + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/cucumber/godog" +) + +// godogsCtxKey is the key used to store the available godogs in the context.Context. +type godogsCtxKey struct{} + +func thereAreGodogs(ctx context.Context, available int) (context.Context, error) { + return context.WithValue(ctx, godogsCtxKey{}, available), nil +} + +func iEat(ctx context.Context, num int) (context.Context, error) { + available, ok := ctx.Value(godogsCtxKey{}).(int) + if !ok { + return ctx, errors.New("there are no godogs available") + } + + if available < num { + return ctx, fmt.Errorf("you cannot eat %d godogs, there are %d available", num, available) + } + + available -= num + + return context.WithValue(ctx, godogsCtxKey{}, available), nil +} + +func thereShouldBeRemaining(ctx context.Context, remaining int) error { + available, ok := ctx.Value(godogsCtxKey{}).(int) + if !ok { + return errors.New("there are no godogs available") + } + + if available != remaining { + return fmt.Errorf("expected %d godogs to be remaining, but there is %d", remaining, available) + } + + return nil +} + +func TestFeatures(t *testing.T) { + suite := godog.TestSuite{ + ScenarioInitializer: InitializeScenario, + Options: &godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + TestingT: t, // Testing instance that will run subtests. + }, + } + + if suite.Run() != 0 { + t.Fatal("non-zero status returned, failed to run feature tests") + } +} + +func InitializeScenario(sc *godog.ScenarioContext) { + sc.Step(`^there are (\d+) godogs$`, thereAreGodogs) + sc.Step(`^I eat (\d+)$`, iEat) + sc.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +In this example, we are using `context.Context` to pass the state between the steps. +Every scenario starts with an empty context and then steps and hooks can add relevant information to it. +Instrumented context is chained through the steps and hooks and is safe to use when multiple scenarios are running concurrently. + +When you run godog again with `go test -v godogs_test.go`, you should see a passing run: +``` +=== RUN TestFeatures +Feature: eat godogs + In order to be happy + As a hungry gopher + I need to be able to eat godogs +=== RUN TestFeatures/Eat_5_out_of_12 + + Scenario: Eat 5 out of 12 # features/godogs.feature:6 + Given there are 12 godogs # godog_test.go:15 -> command-line-arguments.thereAreGodogs + When I eat 5 # godog_test.go:19 -> command-line-arguments.iEat + Then there should be 7 remaining # godog_test.go:34 -> command-line-arguments.thereShouldBeRemaining + +1 scenarios (1 passed) +3 steps (3 passed) +279.917µs +--- PASS: TestFeatures (0.00s) + --- PASS: TestFeatures/Eat_5_out_of_12 (0.00s) +PASS +ok command-line-arguments 0.164s +``` + +You may hook to `ScenarioContext` **Before** event in order to reset or pre-seed the application state before each scenario. +You may hook into more events, like `sc.StepContext()` **After** to print all state in case of an error. +Or **BeforeSuite** to prepare a database. + +By now, you should have figured out, how to use **godog**. Another piece of advice is to make steps orthogonal, small and simple to read for a user. +Whether the user is a dumb website user or an API developer, who may understand a little more technical context - it should target that user. + +When steps are orthogonal and small, you can combine them just like you do with Unix tools. Look how to simplify or remove ones, which can be composed. + +`TestFeatures` acts as a regular Go test, so you can leverage your IDE facilities to run and debug it. + +### Attachments + +An example showing how to make attachments (aka embeddings) to the results is shown in [_examples/attachments](/_examples/attachments/) + +## Code of Conduct + +Everyone interacting in this codebase and issue tracker is expected to follow the Cucumber [code of conduct](https://github.com/cucumber/cucumber/blob/master/CODE_OF_CONDUCT.md). + +## References and Tutorials + +- [cucumber-html-reporter](https://github.com/gkushang/cucumber-html-reporter), + may be used in order to generate **html** reports together with **cucumber** output formatter. See the [following docker image](https://github.com/myie/cucumber-html-reporter) for usage details. +- [how to use godog by semaphoreci](https://semaphoreci.com/community/tutorials/how-to-use-godog-for-behavior-driven-development-in-go) +- see [examples](https://github.com/cucumber/godog/tree/master/_examples) +- see extension [AssistDog](https://github.com/hellomd/assistdog), + which may have useful **gherkin.DataTable** transformations or comparison methods for assertions. + +## Documentation + +See [pkg documentation][godoc] for general API details. +See **[Circle Config](/.circleci/config.yml)** for supported **go** versions. +See `godog -h` for general command options. + +See implementation examples: + +- [rest API server](/_examples/api) +- [rest API with Database](/_examples/db) +- [godogs](/_examples/godogs) + +## FAQ + +### Running Godog with go test + +You may integrate running **godog** in your **go test** command. + +#### Subtests of *testing.T + +You can run test suite using go [Subtests](https://pkg.go.dev/testing#hdr-Subtests_and_Sub_benchmarks). +In this case it is not necessary to have **godog** command installed. See the following example. + +```go +package main_test + +import ( + "testing" + + "github.com/cucumber/godog" +) + +func TestFeatures(t *testing.T) { + suite := godog.TestSuite{ + ScenarioInitializer: func(s *godog.ScenarioContext) { + // Add step definitions here. + }, + Options: &godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + TestingT: t, // Testing instance that will run subtests. + }, + } + + if suite.Run() != 0 { + t.Fatal("non-zero status returned, failed to run feature tests") + } +} +``` + +Then you can run suite. +``` +go test -test.v -test.run ^TestFeatures$ +``` + +Or a particular scenario. +``` +go test -test.v -test.run ^TestFeatures$/^my_scenario$ +``` + +#### TestMain + +You can run test suite using go [TestMain](https://golang.org/pkg/testing/#hdr-Main) func available since **go 1.4**. +In this case it is not necessary to have **godog** command installed. See the following examples. + +The following example binds **godog** flags with specified prefix `godog` in order to prevent flag collisions. + +```go +package main + +import ( + "os" + "testing" + + "github.com/cucumber/godog" + "github.com/cucumber/godog/colors" + "github.com/spf13/pflag" // godog v0.11.0 and later +) + +var opts = godog.Options{ + Output: colors.Colored(os.Stdout), + Format: "progress", // can define default values +} + +func init() { + godog.BindFlags("godog.", pflag.CommandLine, &opts) // godog v0.10.0 and earlier + godog.BindCommandLineFlags("godog.", &opts) // godog v0.11.0 and later +} + +func TestMain(m *testing.M) { + pflag.Parse() + opts.Paths = pflag.Args() + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +Then you may run tests with by specifying flags in order to filter features. + +``` +go test -v --godog.random --godog.tags=wip +go test -v --godog.format=pretty --godog.random -race -coverprofile=coverage.txt -covermode=atomic +``` + +The following example does not bind godog flags, instead manually configuring needed options. + +```go +func TestMain(m *testing.M) { + opts := godog.Options{ + Format: "progress", + Paths: []string{"features"}, + Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order + } + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +You can even go one step further and reuse **go test** flags, like **verbose** mode in order to switch godog **format**. See the following example: + +```go +func TestMain(m *testing.M) { + format := "progress" + for _, arg := range os.Args[1:] { + if arg == "-test.v=true" { // go test transforms -v option + format = "pretty" + break + } + } + + opts := godog.Options{ + Format: format, + Paths: []string{"features"}, + } + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +Now when running `go test -v` it will use **pretty** format. + +### Tags + +If you want to filter scenarios by tags, you can use the `-t=` or `--tags=` where `` is one of the following: + +- `@wip` - run all scenarios with wip tag +- `~@wip` - exclude all scenarios with wip tag +- `@wip && ~@new` - run wip scenarios, but exclude new +- `@wip,@undone` - run wip or undone scenarios + +### Using assertion packages like testify with Godog +A more extensive example can be [found here](/_examples/assert-godogs). + +```go +func thereShouldBeRemaining(ctx context.Context, remaining int) error { + assert.Equal( + godog.T(ctx), Godogs, remaining, + "Expected %d godogs to be remaining, but there is %d", remaining, Godogs, + ) + return nil +} +``` + +### Embeds + +If you're looking to compile your test binary in advance of running, you can compile the feature files into the binary via `go:embed`: + +```go + +//go:embed features/* +var features embed.FS + +var opts = godog.Options{ + Paths: []string{"features"}, + FS: features, +} +``` + +Now, the test binary can be compiled with all feature files embedded, and can be ran independently from the feature files: + +```sh +> go test -c ./test/integration/integration_test.go +> mv integration.test /some/random/dir +> cd /some/random/dir +> ./integration.test +``` + +**NOTE:** `godog.Options.FS` is as `fs.FS`, so custom filesystem loaders can be used. + +## CLI Mode + +**NOTE:** The [`godog` CLI has been deprecated](https://github.com/cucumber/godog/discussions/478). It is recommended to use `go test` instead. + +Another way to use `godog` is to run it in CLI mode. + +In this mode `godog` CLI will use `go` under the hood to compile and run your test suite. + +**Godog** does not intervene with the standard **go test** command behavior. You can leverage both frameworks to functionally test your application while maintaining all test related source code in **_test.go** files. + +**Godog** acts similar compared to **go test** command, by using go compiler and linker tool in order to produce test executable. Godog contexts need to be exported the same way as **Test** functions for go tests. Note, that if you use **godog** command tool, it will use `go` executable to determine compiler and linker. + +### Install +``` +go install github.com/cucumber/godog/cmd/godog@latest +``` +Adding `@v0.12.0` will install v0.12.0 specifically instead of master. + +With `go` version prior to 1.17, use `go get github.com/cucumber/godog/cmd/godog@v0.12.0`. +Running `within the $GOPATH`, you would also need to set `GO111MODULE=on`, like this: +``` +GO111MODULE=on go get github.com/cucumber/godog/cmd/godog@v0.12.0 +``` + +### Configure common options for godog CLI + +There are no global options or configuration files. Alias your common or project based commands: `alias godog-wip="godog --format=progress --tags=@wip"` + +## Concurrency + +When concurrency is configured in options, godog will execute the scenarios concurrently, which is supported by all supplied formatters. + +In order to support concurrency well, you should reset the state and isolate each scenario. They should not share any state. It is suggested to run the suite concurrently in order to make sure there is no state corruption or race conditions in the application. + +It is also useful to randomize the order of scenario execution, which you can now do with `--random` command option or `godog.Options.Randomize` setting. + +### Building your own custom formatter +A simple example can be [found here](/_examples/custom-formatter). + +## License +**Godog** and **Gherkin** are licensed under the [MIT][license] and developed as a part of the [cucumber project][cucumber] + +[godoc]: https://pkg.go.dev/github.com/cucumber/godog "Documentation on godog" +[golang]: https://golang.org/ "GO programming language" +[behat]: http://docs.behat.org/ "Behavior driven development framework for PHP" +[cucumber]: https://cucumber.io/ "Behavior driven development framework" +[license]: https://en.wikipedia.org/wiki/MIT_License "The MIT license" +[contributing guide]: https://github.com/cucumber/godog/blob/main/CONTRIBUTING.md +[releasing guide]: https://github.com/cucumber/godog/blob/main/RELEASING.md +[community Discord]: https://cucumber.io/community#discord + + + diff --git a/vendor/github.com/cucumber/godog/RELEASING.md b/vendor/github.com/cucumber/godog/RELEASING.md new file mode 100644 index 000000000..cba243657 --- /dev/null +++ b/vendor/github.com/cucumber/godog/RELEASING.md @@ -0,0 +1,67 @@ +# Releasing Guidelines for Cucumber Godog + +This document provides guidelines for releasing new versions of Cucumber Godog. Follow these steps to ensure a smooth and consistent release process. + +## Versioning + +Cucumber Godog follows [Semantic Versioning]. Version numbers are in the format `MAJOR.MINOR.PATCH`. + +### Current (for v0.MINOR.PATCH) + +- **MINOR**: Incompatible API changes. +- **PATCH**: Backward-compatible new features and bug fixes. + +### After v1.X.X release + +- **MAJOR**: Incompatible API changes. +- **MINOR**: Backward-compatible new features. +- **PATCH**: Backward-compatible bug fixes. + +## Release Process + +1. **Update Changelog:** + - Open `CHANGELOG.md` and add an entry for the upcoming release formatting according to the principles of [Keep A CHANGELOG]. + - Include details about new features, enhancements, and bug fixes. + +2. **Run Tests:** + - Run the test suite to ensure all existing features are working as expected. + +3. **Manual Testing for Backwards Compatibility:** + - Manually test the new release with external libraries that depend on Cucumber Godog. + - Look for any potential backwards compatibility issues, especially with widely-used libraries. + - Address any identified issues before proceeding. + +4. **Create Release on GitHub:** + - Go to the [Releases] page on GitHub. + - Click on "Draft a new release." + - Tag version should be set to the new tag vMAJOR.MINOR.PATCH + - Title the release using the version number (e.g., "vMAJOR.MINOR.PATCH"). + - Click 'Generate release notes' + +5. **Publish Release:** + - Click "Publish release" to make the release public. + +6. **Announce the Release:** + - Make an announcement on relevant communication channels (e.g., [community Discord]) about the new release. + +## Additional Considerations + +- **Documentation:** + - Update the project documentation on the [website], if applicable. + +- **Deprecation Notices:** + - If any features are deprecated, clearly document them in the release notes and provide guidance on migration. + +- **Compatibility:** + - Clearly state any compatibility requirements or changes in the release notes. + +- **Feedback:** + - Encourage users to provide feedback and report any issues with the new release. + +Following these guidelines, including manual testing with external libraries, will help ensure a thorough release process for Cucumber Godog, allowing detection and resolution of potential backwards compatibility issues before tagging the release. + +[community Discord]: https://cucumber.io/community#discord +[website]: https://cucumber.github.io/godog/ +[Releases]: https://github.com/cucumber/godog/releases +[Semantic Versioning]: http://semver.org +[Keep A CHANGELOG]: http://keepachangelog.com \ No newline at end of file diff --git a/vendor/github.com/cucumber/godog/codecov.yml b/vendor/github.com/cucumber/godog/codecov.yml new file mode 100644 index 000000000..1418fc73d --- /dev/null +++ b/vendor/github.com/cucumber/godog/codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + threshold: 0.5% + patch: + default: + threshold: 0.5% diff --git a/vendor/github.com/cucumber/godog/colors/ansi_others.go b/vendor/github.com/cucumber/godog/colors/ansi_others.go new file mode 100644 index 000000000..6a166079f --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/ansi_others.go @@ -0,0 +1,19 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +//go:build !windows +// +build !windows + +package colors + +import "io" + +type ansiColorWriter struct { + w io.Writer + mode outputMode +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + return cw.w.Write(p) +} diff --git a/vendor/github.com/cucumber/godog/colors/ansi_windows.go b/vendor/github.com/cucumber/godog/colors/ansi_windows.go new file mode 100644 index 000000000..8a92c8223 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/ansi_windows.go @@ -0,0 +1,418 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package colors + +import ( + "bytes" + "io" + "strings" + "syscall" + "unsafe" +) + +type csiState int + +const ( + outsideCsiCode csiState = iota + firstCsiCode + secondCsiCode +) + +type parseResult int + +const ( + noConsole parseResult = iota + changedColor + unknown +) + +type ansiColorWriter struct { + w io.Writer + mode outputMode + state csiState + paramStartBuf bytes.Buffer + paramBuf bytes.Buffer +} + +const ( + firstCsiChar byte = '\x1b' + secondeCsiChar byte = '[' + separatorChar byte = ';' + sgrCode byte = 'm' +) + +const ( + foregroundBlue = uint16(0x0001) + foregroundGreen = uint16(0x0002) + foregroundRed = uint16(0x0004) + foregroundIntensity = uint16(0x0008) + backgroundBlue = uint16(0x0010) + backgroundGreen = uint16(0x0020) + backgroundRed = uint16(0x0040) + backgroundIntensity = uint16(0x0080) + underscore = uint16(0x8000) + + foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity + backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity +) + +const ( + ansiReset = "0" + ansiIntensityOn = "1" + ansiIntensityOff = "21" + ansiUnderlineOn = "4" + ansiUnderlineOff = "24" + ansiBlinkOn = "5" + ansiBlinkOff = "25" + + ansiForegroundBlack = "30" + ansiForegroundRed = "31" + ansiForegroundGreen = "32" + ansiForegroundYellow = "33" + ansiForegroundBlue = "34" + ansiForegroundMagenta = "35" + ansiForegroundCyan = "36" + ansiForegroundWhite = "37" + ansiForegroundDefault = "39" + + ansiBackgroundBlack = "40" + ansiBackgroundRed = "41" + ansiBackgroundGreen = "42" + ansiBackgroundYellow = "43" + ansiBackgroundBlue = "44" + ansiBackgroundMagenta = "45" + ansiBackgroundCyan = "46" + ansiBackgroundWhite = "47" + ansiBackgroundDefault = "49" + + ansiLightForegroundGray = "90" + ansiLightForegroundRed = "91" + ansiLightForegroundGreen = "92" + ansiLightForegroundYellow = "93" + ansiLightForegroundBlue = "94" + ansiLightForegroundMagenta = "95" + ansiLightForegroundCyan = "96" + ansiLightForegroundWhite = "97" + + ansiLightBackgroundGray = "100" + ansiLightBackgroundRed = "101" + ansiLightBackgroundGreen = "102" + ansiLightBackgroundYellow = "103" + ansiLightBackgroundBlue = "104" + ansiLightBackgroundMagenta = "105" + ansiLightBackgroundCyan = "106" + ansiLightBackgroundWhite = "107" +) + +type drawType int + +const ( + foreground drawType = iota + background +) + +type winColor struct { + code uint16 + drawType drawType +} + +var colorMap = map[string]winColor{ + ansiForegroundBlack: {0, foreground}, + ansiForegroundRed: {foregroundRed, foreground}, + ansiForegroundGreen: {foregroundGreen, foreground}, + ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, + ansiForegroundBlue: {foregroundBlue, foreground}, + ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, + ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, + ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiBackgroundBlack: {0, background}, + ansiBackgroundRed: {backgroundRed, background}, + ansiBackgroundGreen: {backgroundGreen, background}, + ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, + ansiBackgroundBlue: {backgroundBlue, background}, + ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, + ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, + ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, + ansiBackgroundDefault: {0, background}, + + ansiLightForegroundGray: {foregroundIntensity, foreground}, + ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, + ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, + ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, + ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, + ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, + ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, + ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiLightBackgroundGray: {backgroundIntensity, background}, + ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, + ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, + ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, + ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, + ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, + ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, + ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + defaultAttr *textAttributes +) + +func init() { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo != nil { + colorMap[ansiForegroundDefault] = winColor{ + screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), + foreground, + } + colorMap[ansiBackgroundDefault] = winColor{ + screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), + background, + } + defaultAttr = convertTextAttr(screenInfo.WAttributes) + } +} + +type coord struct { + X, Y int16 +} + +type smallRect struct { + Left, Top, Right, Bottom int16 +} + +type consoleScreenBufferInfo struct { + DwSize coord + DwCursorPosition coord + WAttributes uint16 + SrWindow smallRect + DwMaximumWindowSize coord +} + +func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { + var csbi consoleScreenBufferInfo + ret, _, _ := procGetConsoleScreenBufferInfo.Call( + hConsoleOutput, + uintptr(unsafe.Pointer(&csbi))) + if ret == 0 { + return nil + } + return &csbi +} + +func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { + ret, _, _ := procSetConsoleTextAttribute.Call( + hConsoleOutput, + uintptr(wAttributes)) + return ret != 0 +} + +type textAttributes struct { + foregroundColor uint16 + backgroundColor uint16 + foregroundIntensity uint16 + backgroundIntensity uint16 + underscore uint16 + otherAttributes uint16 +} + +func convertTextAttr(winAttr uint16) *textAttributes { + fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) + bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) + fgIntensity := winAttr & foregroundIntensity + bgIntensity := winAttr & backgroundIntensity + underline := winAttr & underscore + otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) + return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} +} + +func convertWinAttr(textAttr *textAttributes) uint16 { + var winAttr uint16 + winAttr |= textAttr.foregroundColor + winAttr |= textAttr.backgroundColor + winAttr |= textAttr.foregroundIntensity + winAttr |= textAttr.backgroundIntensity + winAttr |= textAttr.underscore + winAttr |= textAttr.otherAttributes + return winAttr +} + +func changeColor(param []byte) parseResult { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo == nil { + return noConsole + } + + winAttr := convertTextAttr(screenInfo.WAttributes) + strParam := string(param) + if len(strParam) <= 0 { + strParam = "0" + } + csiParam := strings.Split(strParam, string(separatorChar)) + for _, p := range csiParam { + c, ok := colorMap[p] + switch { + case !ok: + switch p { + case ansiReset: + winAttr.foregroundColor = defaultAttr.foregroundColor + winAttr.backgroundColor = defaultAttr.backgroundColor + winAttr.foregroundIntensity = defaultAttr.foregroundIntensity + winAttr.backgroundIntensity = defaultAttr.backgroundIntensity + winAttr.underscore = 0 + winAttr.otherAttributes = 0 + case ansiIntensityOn: + winAttr.foregroundIntensity = foregroundIntensity + case ansiIntensityOff: + winAttr.foregroundIntensity = 0 + case ansiUnderlineOn: + winAttr.underscore = underscore + case ansiUnderlineOff: + winAttr.underscore = 0 + case ansiBlinkOn: + winAttr.backgroundIntensity = backgroundIntensity + case ansiBlinkOff: + winAttr.backgroundIntensity = 0 + default: + // unknown code + } + case c.drawType == foreground: + winAttr.foregroundColor = c.code + case c.drawType == background: + winAttr.backgroundColor = c.code + } + } + winTextAttribute := convertWinAttr(winAttr) + setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) + + return changedColor +} + +func parseEscapeSequence(command byte, param []byte) parseResult { + if defaultAttr == nil { + return noConsole + } + + switch command { + case sgrCode: + return changeColor(param) + default: + return unknown + } +} + +func (cw *ansiColorWriter) flushBuffer() (int, error) { + return cw.flushTo(cw.w) +} + +func (cw *ansiColorWriter) resetBuffer() (int, error) { + return cw.flushTo(nil) +} + +func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { + var n1, n2 int + var err error + + startBytes := cw.paramStartBuf.Bytes() + cw.paramStartBuf.Reset() + if w != nil { + n1, err = cw.w.Write(startBytes) + if err != nil { + return n1, err + } + } else { + n1 = len(startBytes) + } + paramBytes := cw.paramBuf.Bytes() + cw.paramBuf.Reset() + if w != nil { + n2, err = cw.w.Write(paramBytes) + if err != nil { + return n1 + n2, err + } + } else { + n2 = len(paramBytes) + } + return n1 + n2, nil +} + +func isParameterChar(b byte) bool { + return ('0' <= b && b <= '9') || b == separatorChar +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + r, nw, first, last := 0, 0, 0, 0 + if cw.mode != discardNonColorEscSeq { + cw.state = outsideCsiCode + cw.resetBuffer() + } + + var err error + for i, ch := range p { + switch cw.state { + case outsideCsiCode: + if ch == firstCsiChar { + cw.paramStartBuf.WriteByte(ch) + cw.state = firstCsiCode + } + case firstCsiCode: + switch ch { + case firstCsiChar: + cw.paramStartBuf.WriteByte(ch) + break + case secondeCsiChar: + cw.paramStartBuf.WriteByte(ch) + cw.state = secondCsiCode + last = i - 1 + default: + cw.resetBuffer() + cw.state = outsideCsiCode + } + case secondCsiCode: + if isParameterChar(ch) { + cw.paramBuf.WriteByte(ch) + } else { + nw, err = cw.w.Write(p[first:last]) + r += nw + if err != nil { + return r, err + } + first = i + 1 + result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) + if result == noConsole || (cw.mode == outputNonColorEscSeq && result == unknown) { + cw.paramBuf.WriteByte(ch) + nw, err := cw.flushBuffer() + if err != nil { + return r, err + } + r += nw + } else { + n, _ := cw.resetBuffer() + // Add one more to the size of the buffer for the last ch + r += n + 1 + } + + cw.state = outsideCsiCode + } + default: + cw.state = outsideCsiCode + } + } + + if cw.mode != discardNonColorEscSeq || cw.state == outsideCsiCode { + nw, err = cw.w.Write(p[first:]) + r += nw + } + + return r, err +} diff --git a/vendor/github.com/cucumber/godog/colors/colors.go b/vendor/github.com/cucumber/godog/colors/colors.go new file mode 100644 index 000000000..be7722e95 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/colors.go @@ -0,0 +1,68 @@ +package colors + +import ( + "fmt" + "strings" +) + +const ansiEscape = "\x1b" + +// a color code type +type color int + +// some ansi colors +const ( + black color = iota + 30 + red + green + yellow + blue // unused + magenta // unused + cyan + white +) + +func colorize(s interface{}, c color) string { + return fmt.Sprintf("%s[%dm%v%s[0m", ansiEscape, c, s, ansiEscape) +} + +// ColorFunc is a helper type to create colorized strings. +type ColorFunc func(interface{}) string + +// Bold will accept a ColorFunc and return a new ColorFunc +// that will make the string bold. +func Bold(fn ColorFunc) ColorFunc { + return ColorFunc(func(input interface{}) string { + return strings.Replace(fn(input), ansiEscape+"[", ansiEscape+"[1;", 1) + }) +} + +// Green will accept an interface and return a colorized green string. +func Green(s interface{}) string { + return colorize(s, green) +} + +// Red will accept an interface and return a colorized red string. +func Red(s interface{}) string { + return colorize(s, red) +} + +// Cyan will accept an interface and return a colorized cyan string. +func Cyan(s interface{}) string { + return colorize(s, cyan) +} + +// Black will accept an interface and return a colorized black string. +func Black(s interface{}) string { + return colorize(s, black) +} + +// Yellow will accept an interface and return a colorized yellow string. +func Yellow(s interface{}) string { + return colorize(s, yellow) +} + +// White will accept an interface and return a colorized white string. +func White(s interface{}) string { + return colorize(s, white) +} diff --git a/vendor/github.com/cucumber/godog/colors/no_colors.go b/vendor/github.com/cucumber/godog/colors/no_colors.go new file mode 100644 index 000000000..2eeb80243 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/no_colors.go @@ -0,0 +1,59 @@ +package colors + +import ( + "bytes" + "fmt" + "io" +) + +type noColors struct { + out io.Writer + lastbuf bytes.Buffer +} + +// Uncolored will accept and io.Writer and return a +// new io.Writer that won't include colors. +func Uncolored(w io.Writer) io.Writer { + return &noColors{out: w} +} + +func (w *noColors) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/cucumber/godog/colors/writer.go b/vendor/github.com/cucumber/godog/colors/writer.go new file mode 100644 index 000000000..469c7a5ed --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/writer.go @@ -0,0 +1,41 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package colors + +import "io" + +type outputMode int + +// DiscardNonColorEscSeq supports the divided color escape sequence. +// But non-color escape sequence is not output. +// Please use the OutputNonColorEscSeq If you want to output a non-color +// escape sequences such as ncurses. However, it does not support the divided +// color escape sequence. +const ( + _ outputMode = iota + discardNonColorEscSeq + outputNonColorEscSeq // unused +) + +// Colored creates and initializes a new ansiColorWriter +// using io.Writer w as its initial contents. +// In the console of Windows, which change the foreground and background +// colors of the text by the escape sequence. +// In the console of other systems, which writes to w all text. +func Colored(w io.Writer) io.Writer { + return createModeAnsiColorWriter(w, discardNonColorEscSeq) +} + +// NewModeAnsiColorWriter create and initializes a new ansiColorWriter +// by specifying the outputMode. +func createModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { + if _, ok := w.(*ansiColorWriter); !ok { + return &ansiColorWriter{ + w: w, + mode: mode, + } + } + return w +} diff --git a/vendor/github.com/cucumber/godog/flags.go b/vendor/github.com/cucumber/godog/flags.go new file mode 100644 index 000000000..45efbfec7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/flags.go @@ -0,0 +1,255 @@ +package godog + +import ( + "flag" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/utils" +) + +// repeats a space n times +var s = utils.S + +var descFeaturesArgument = "Optional feature(s) to run. Can be:\n" + + s(4) + "- dir " + colors.Yellow("(features/)") + "\n" + + s(4) + "- feature " + colors.Yellow("(*.feature)") + "\n" + + s(4) + "- scenario at specific line " + colors.Yellow("(*.feature:10)") + "\n" + + "If no feature paths are listed, suite tries " + colors.Yellow("features") + " path by default.\n" + + "Multiple comma-separated values can be provided.\n" + +var descConcurrencyOption = "Run the test suite with concurrency level:\n" + + s(4) + "- " + colors.Yellow(`= 1`) + ": supports all types of formats.\n" + + s(4) + "- " + colors.Yellow(`>= 2`) + ": only supports " + colors.Yellow("progress") + ". Note, that\n" + + s(4) + "your context needs to support parallel execution." + +var descTagsOption = "Filter scenarios by tags. Expression can be:\n" + + s(4) + "- " + colors.Yellow(`"@wip"`) + ": run all scenarios with wip tag\n" + + s(4) + "- " + colors.Yellow(`"~@wip"`) + ": exclude all scenarios with wip tag\n" + + s(4) + "- " + colors.Yellow(`"@wip && ~@new"`) + ": run wip scenarios, but exclude new\n" + + s(4) + "- " + colors.Yellow(`"@wip,@undone"`) + ": run wip or undone scenarios" + +var descRandomOption = "Randomly shuffle the scenario execution order.\n" + + "Specify SEED to reproduce the shuffling from a previous run.\n" + + s(4) + `e.g. ` + colors.Yellow(`--random`) + " or " + colors.Yellow(`--random=5738`) + +// FlagSet allows to manage flags by external suite runner +// builds flag.FlagSet with godog flags binded +// +// Deprecated: +func FlagSet(opt *Options) *flag.FlagSet { + set := flag.NewFlagSet("godog", flag.ExitOnError) + BindFlags("", set, opt) + set.Usage = usage(set, opt.Output) + return set +} + +// BindFlags binds godog flags to given flag set prefixed +// by given prefix, without overriding usage +func BindFlags(prefix string, set *flag.FlagSet, opt *Options) { + set.Usage = usage(set, set.Output()) + + descFormatOption := "How to format tests output. Built-in formats:\n" + + type fm struct { + name string + desc string + } + var fms []fm + for name, desc := range AvailableFormatters() { + fms = append(fms, fm{ + name: name, + desc: desc, + }) + } + sort.Slice(fms, func(i, j int) bool { + return fms[i].name < fms[j].name + }) + + for _, fm := range fms { + descFormatOption += s(4) + "- " + colors.Yellow(fm.name) + ": " + fm.desc + "\n" + } + + descFormatOption = strings.TrimSpace(descFormatOption) + + // override flag defaults if any corresponding properties were supplied on the incoming `opt` + defFormatOption := "pretty" + if opt.Format != "" { + defFormatOption = opt.Format + } + + defTagsOption := "" + if opt.Tags != "" { + defTagsOption = opt.Tags + } + + defConcurrencyOption := 1 + if opt.Concurrency != 0 { + defConcurrencyOption = opt.Concurrency + } + + defShowStepDefinitions := false + if opt.ShowStepDefinitions { + defShowStepDefinitions = opt.ShowStepDefinitions + } + + defStopOnFailure := false + if opt.StopOnFailure { + defStopOnFailure = opt.StopOnFailure + } + + defStrict := false + if opt.Strict { + defStrict = opt.Strict + } + + defNoColors := false + if opt.NoColors { + defNoColors = opt.NoColors + } + + set.StringVar(&opt.Format, prefix+"format", defFormatOption, descFormatOption) + set.StringVar(&opt.Format, prefix+"f", defFormatOption, descFormatOption) + set.StringVar(&opt.Tags, prefix+"tags", defTagsOption, descTagsOption) + set.StringVar(&opt.Tags, prefix+"t", defTagsOption, descTagsOption) + set.IntVar(&opt.Concurrency, prefix+"concurrency", defConcurrencyOption, descConcurrencyOption) + set.IntVar(&opt.Concurrency, prefix+"c", defConcurrencyOption, descConcurrencyOption) + set.BoolVar(&opt.ShowStepDefinitions, prefix+"definitions", defShowStepDefinitions, "Print all available step definitions.") + set.BoolVar(&opt.ShowStepDefinitions, prefix+"d", defShowStepDefinitions, "Print all available step definitions.") + set.BoolVar(&opt.StopOnFailure, prefix+"stop-on-failure", defStopOnFailure, "Stop processing on first failed scenario.") + set.BoolVar(&opt.Strict, prefix+"strict", defStrict, "Fail suite when there are pending or undefined or ambiguous steps.") + set.BoolVar(&opt.NoColors, prefix+"no-colors", defNoColors, "Disable ansi colors.") + set.Var(&randomSeed{&opt.Randomize}, prefix+"random", descRandomOption) + set.BoolVar(&opt.ShowHelp, "godog.help", false, "Show usage help.") + set.Func(prefix+"paths", descFeaturesArgument, func(paths string) error { + if paths != "" { + opt.Paths = strings.Split(paths, ",") + } + + return nil + }) +} + +type flagged struct { + short, long, descr, dflt string +} + +func (f *flagged) name() string { + var name string + switch { + case len(f.short) > 0 && len(f.long) > 0: + name = fmt.Sprintf("-%s, --%s", f.short, f.long) + case len(f.long) > 0: + name = fmt.Sprintf("--%s", f.long) + case len(f.short) > 0: + name = fmt.Sprintf("-%s", f.short) + } + + if f.long == "random" { + // `random` is special in that we will later assign it randomly + // if the user specifies `--random` without specifying one, + // so mask the "default" value here to avoid UI confusion about + // what the value will end up being. + name += "[=SEED]" + } else if f.dflt != "true" && f.dflt != "false" { + name += "=" + f.dflt + } + return name +} + +func usage(set *flag.FlagSet, w io.Writer) func() { + return func() { + var list []*flagged + var longest int + set.VisitAll(func(f *flag.Flag) { + var fl *flagged + for _, flg := range list { + if flg.descr == f.Usage { + fl = flg + break + } + } + if nil == fl { + fl = &flagged{ + dflt: f.DefValue, + descr: f.Usage, + } + list = append(list, fl) + } + if len(f.Name) > 2 { + fl.long = f.Name + } else { + fl.short = f.Name + } + }) + + for _, f := range list { + if len(f.name()) > longest { + longest = len(f.name()) + } + } + + // prints an option or argument with a description, or only description + opt := func(name, desc string) string { + var ret []string + lines := strings.Split(desc, "\n") + ret = append(ret, s(2)+colors.Green(name)+s(longest+2-len(name))+lines[0]) + if len(lines) > 1 { + for _, ln := range lines[1:] { + ret = append(ret, s(2)+s(longest+2)+ln) + } + } + return strings.Join(ret, "\n") + } + + // --- GENERAL --- + fmt.Fprintln(w, colors.Yellow("Usage:")) + fmt.Fprintf(w, s(2)+"go test [options]\n\n") + + // --- OPTIONS --- + fmt.Fprintln(w, colors.Yellow("Options:")) + for _, f := range list { + fmt.Fprintln(w, opt(f.name(), f.descr)) + } + fmt.Fprintln(w, "") + } +} + +// randomSeed implements `flag.Value`, see https://golang.org/pkg/flag/#Value +type randomSeed struct { + ref *int64 +} + +func (rs *randomSeed) Set(s string) error { + if s == "true" { + *rs.ref = makeRandomSeed() + return nil + } + + if s == "false" { + *rs.ref = 0 + return nil + } + + i, err := strconv.ParseInt(s, 10, 64) + *rs.ref = i + return err +} + +func (rs *randomSeed) String() string { + if rs.ref == nil { + return "0" + } + return strconv.FormatInt(*rs.ref, 10) +} + +// If a Value has an IsBoolFlag() bool method returning true, the command-line +// parser makes -name equivalent to -name=true rather than using the next +// command-line argument. +func (rs *randomSeed) IsBoolFlag() bool { + return *rs.ref == 0 +} diff --git a/vendor/github.com/cucumber/godog/flags_v0110.go b/vendor/github.com/cucumber/godog/flags_v0110.go new file mode 100644 index 000000000..eddf0279d --- /dev/null +++ b/vendor/github.com/cucumber/godog/flags_v0110.go @@ -0,0 +1,33 @@ +package godog + +import ( + "errors" + "flag" + "math/rand" + "time" + + "github.com/spf13/pflag" + + "github.com/cucumber/godog/internal/flags" +) + +// Choose randomly assigns a convenient pseudo-random seed value. +// The resulting seed will be between `1-99999` for later ease of specification. +func makeRandomSeed() int64 { + return rand.New(rand.NewSource(time.Now().UTC().UnixNano())).Int63n(99998) + 1 +} + +func flagSet(opt *Options) *pflag.FlagSet { + set := pflag.NewFlagSet("godog", pflag.ExitOnError) + flags.BindRunCmdFlags("", set, opt) + pflag.ErrHelp = errors.New("godog: help requested") + return set +} + +// BindCommandLineFlags binds godog flags to given flag set prefixed +// by given prefix, without overriding usage +func BindCommandLineFlags(prefix string, opts *Options) { + flagSet := pflag.CommandLine + flags.BindRunCmdFlags(prefix, flagSet, opts) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) +} diff --git a/vendor/github.com/cucumber/godog/fmt.go b/vendor/github.com/cucumber/godog/fmt.go new file mode 100644 index 000000000..f30f9f895 --- /dev/null +++ b/vendor/github.com/cucumber/godog/fmt.go @@ -0,0 +1,124 @@ +package godog + +import ( + "fmt" + "io" + "strings" + "unicode/utf8" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + internal_fmt "github.com/cucumber/godog/internal/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" +) + +// FindFmt searches available formatters registered +// and returns FormaterFunc matched by given +// format name or nil otherwise +func FindFmt(name string) FormatterFunc { + return formatters.FindFmt(name) +} + +// Format registers a feature suite output +// formatter by given name, description and +// FormatterFunc constructor function, to initialize +// formatter with the output recorder. +func Format(name, description string, f FormatterFunc) { + formatters.Format(name, description, f) +} + +// AvailableFormatters gives a map of all +// formatters registered with their name as key +// and description as value +func AvailableFormatters() map[string]string { + return formatters.AvailableFormatters() +} + +// Formatter is an interface for feature runner +// output summary presentation. +// +// New formatters may be created to represent +// suite results in different ways. These new +// formatters needs to be registered with a +// godog.Format function call +type Formatter = formatters.Formatter + +type storageFormatter interface { + SetStorage(*storage.Storage) +} + +// FormatterFunc builds a formatter with given +// suite name and io.Writer to record output +type FormatterFunc = formatters.FormatterFunc + +func printStepDefinitions(steps []*models.StepDefinition, w io.Writer) { + var longest int + for _, def := range steps { + n := utf8.RuneCountInString(def.Expr.String()) + if longest < n { + longest = n + } + } + + for _, def := range steps { + n := utf8.RuneCountInString(def.Expr.String()) + location := internal_fmt.DefinitionID(def) + spaces := strings.Repeat(" ", longest-n) + fmt.Fprintln(w, + colors.Yellow(def.Expr.String())+spaces, + colors.Bold(colors.Black)("# "+location)) + } + + if len(steps) == 0 { + fmt.Fprintln(w, "there were no contexts registered, could not find any step definition..") + } +} + +// NewBaseFmt creates a new base formatter. +func NewBaseFmt(suite string, out io.Writer) *BaseFmt { + return internal_fmt.NewBase(suite, out) +} + +// NewProgressFmt creates a new progress formatter. +func NewProgressFmt(suite string, out io.Writer) *ProgressFmt { + return internal_fmt.NewProgress(suite, out) +} + +// NewPrettyFmt creates a new pretty formatter. +func NewPrettyFmt(suite string, out io.Writer) *PrettyFmt { + return &PrettyFmt{Base: NewBaseFmt(suite, out)} +} + +// NewEventsFmt creates a new event streaming formatter. +func NewEventsFmt(suite string, out io.Writer) *EventsFmt { + return &EventsFmt{Base: NewBaseFmt(suite, out)} +} + +// NewCukeFmt creates a new Cucumber JSON formatter. +func NewCukeFmt(suite string, out io.Writer) *CukeFmt { + return &CukeFmt{Base: NewBaseFmt(suite, out)} +} + +// NewJUnitFmt creates a new JUnit formatter. +func NewJUnitFmt(suite string, out io.Writer) *JUnitFmt { + return &JUnitFmt{Base: NewBaseFmt(suite, out)} +} + +// BaseFmt exports Base formatter. +type BaseFmt = internal_fmt.Base + +// ProgressFmt exports Progress formatter. +type ProgressFmt = internal_fmt.Progress + +// PrettyFmt exports Pretty formatter. +type PrettyFmt = internal_fmt.Pretty + +// EventsFmt exports Events formatter. +type EventsFmt = internal_fmt.Events + +// CukeFmt exports Cucumber JSON formatter. +type CukeFmt = internal_fmt.Cuke + +// JUnitFmt exports JUnit formatter. +type JUnitFmt = internal_fmt.JUnit diff --git a/vendor/github.com/cucumber/godog/formatters/fmt.go b/vendor/github.com/cucumber/godog/formatters/fmt.go new file mode 100644 index 000000000..973cf11b8 --- /dev/null +++ b/vendor/github.com/cucumber/godog/formatters/fmt.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "io" + "regexp" + + messages "github.com/cucumber/messages/go/v21" +) + +type registeredFormatter struct { + name string + description string + fmt FormatterFunc +} + +var registeredFormatters []*registeredFormatter + +// FindFmt searches available formatters registered +// and returns FormaterFunc matched by given +// format name or nil otherwise +func FindFmt(name string) FormatterFunc { + for _, el := range registeredFormatters { + if el.name == name { + return el.fmt + } + } + + return nil +} + +// Format registers a feature suite output +// formatter by given name, description and +// FormatterFunc constructor function, to initialize +// formatter with the output recorder. +func Format(name, description string, f FormatterFunc) { + registeredFormatters = append(registeredFormatters, ®isteredFormatter{ + name: name, + fmt: f, + description: description, + }) +} + +// AvailableFormatters gives a map of all +// formatters registered with their name as key +// and description as value +func AvailableFormatters() map[string]string { + fmts := make(map[string]string, len(registeredFormatters)) + + for _, f := range registeredFormatters { + fmts[f.name] = f.description + } + + return fmts +} + +// Formatter is an interface for feature runner +// output summary presentation. +// +// New formatters may be created to represent +// suite results in different ways. These new +// formatters needs to be registered with a +// godog.Format function call +type Formatter interface { + TestRunStarted() + Feature(*messages.GherkinDocument, string, []byte) + Pickle(*messages.Pickle) + Defined(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Failed(*messages.Pickle, *messages.PickleStep, *StepDefinition, error) + Passed(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Skipped(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Undefined(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Pending(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Ambiguous(*messages.Pickle, *messages.PickleStep, *StepDefinition, error) + Summary() +} + +// FlushFormatter is a `Formatter` but can be flushed. +type FlushFormatter interface { + Formatter + Flush() +} + +// FormatterFunc builds a formatter with given +// suite name and io.Writer to record output +type FormatterFunc func(string, io.Writer) Formatter + +// StepDefinition is a registered step definition +// contains a StepHandler and regexp which +// is used to match a step. Args which +// were matched by last executed step +// +// This structure is passed to the formatter +// when step is matched and is either failed +// or successful +type StepDefinition struct { + Expr *regexp.Regexp + Handler interface{} + Keyword Keyword +} + +type Keyword int64 + +const ( + Given Keyword = iota + When + Then + None +) diff --git a/vendor/github.com/cucumber/godog/godog.go b/vendor/github.com/cucumber/godog/godog.go new file mode 100644 index 000000000..dda501471 --- /dev/null +++ b/vendor/github.com/cucumber/godog/godog.go @@ -0,0 +1,43 @@ +/* +Package godog is the official Cucumber BDD framework for Golang, it merges specification +and test documentation into one cohesive whole. + +Godog does not intervene with the standard "go test" command and it's behavior. +You can leverage both frameworks to functionally test your application while +maintaining all test related source code in *_test.go files. + +Godog acts similar compared to go test command. It uses go +compiler and linker tool in order to produce test executable. Godog +contexts needs to be exported same as Test functions for go test. + +For example, imagine you're about to create the famous UNIX ls command. +Before you begin, you describe how the feature should work, see the example below.. + +Example: + + Feature: ls + In order to see the directory structure + As a UNIX user + I need to be able to list the current directory's contents + + Scenario: + Given I am in a directory "test" + And I have a file named "foo" + And I have a file named "bar" + When I run ls + Then I should get output: + """ + bar + foo + """ + +Now, wouldn't it be cool if something could read this sentence and use it to actually +run a test against the ls command? Hey, that's exactly what this package does! +As you'll see, Godog is easy to learn, quick to use, and will put the fun back into tests. + +Godog was inspired by Behat and Cucumber the above description is taken from it's documentation. +*/ +package godog + +// Version of package - based on Semantic Versioning 2.0.0 http://semver.org/ +var Version = "v0.0.0-dev" diff --git a/vendor/github.com/cucumber/godog/internal/builder/ast.go b/vendor/github.com/cucumber/godog/internal/builder/ast.go new file mode 100644 index 000000000..c4f82407c --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/builder/ast.go @@ -0,0 +1,31 @@ +package builder + +import "go/ast" + +func astContexts(f *ast.File, selectName string) []string { + var contexts []string + for _, d := range f.Decls { + switch fun := d.(type) { + case *ast.FuncDecl: + for _, param := range fun.Type.Params.List { + switch expr := param.Type.(type) { + case *ast.StarExpr: + switch x := expr.X.(type) { + case *ast.Ident: + if x.Name == selectName { + contexts = append(contexts, fun.Name.Name) + } + case *ast.SelectorExpr: + switch t := x.X.(type) { + case *ast.Ident: + if t.Name == "godog" && x.Sel.Name == selectName { + contexts = append(contexts, fun.Name.Name) + } + } + } + } + } + } + } + return contexts +} diff --git a/vendor/github.com/cucumber/godog/internal/builder/builder.go b/vendor/github.com/cucumber/godog/internal/builder/builder.go new file mode 100644 index 000000000..4cd4928f5 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/builder/builder.go @@ -0,0 +1,490 @@ +package builder + +import ( + "bytes" + "encoding/json" + "fmt" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "text/template" + "time" + "unicode" +) + +var ( + tooldir = findToolDir() + compiler = filepath.Join(tooldir, "compile") + linker = filepath.Join(tooldir, "link") + gopaths = filepath.SplitList(build.Default.GOPATH) + godogImportPath = "github.com/cucumber/godog" + + // godep + runnerTemplate = template.Must(template.New("testmain").Parse(`package main + +import ( + "github.com/cucumber/godog" + {{if or .TestSuiteContexts .ScenarioContexts}}_test "{{.ImportPath}}"{{end}} + {{if or .XTestSuiteContexts .XScenarioContexts}}_xtest "{{.ImportPath}}_test"{{end}} + {{if or .XTestSuiteContexts .XScenarioContexts}}"testing/internal/testdeps"{{end}} + "os" +) + +{{if or .XTestSuiteContexts .XScenarioContexts}} +func init() { + testdeps.ImportPath = "{{.ImportPath}}" +} +{{end}} + +func main() { + status := godog.TestSuite{ + Name: "{{ .Name }}", + TestSuiteInitializer: func (ctx *godog.TestSuiteContext) { + os.Setenv("GODOG_TESTED_PACKAGE", "{{.ImportPath}}") + {{range .TestSuiteContexts}} + _test.{{ . }}(ctx) + {{end}} + {{range .XTestSuiteContexts}} + _xtest.{{ . }}(ctx) + {{end}} + }, + ScenarioInitializer: func (ctx *godog.ScenarioContext) { + {{range .ScenarioContexts}} + _test.{{ . }}(ctx) + {{end}} + {{range .XScenarioContexts}} + _xtest.{{ . }}(ctx) + {{end}} + }, + }.Run() + + os.Exit(status) +}`)) + + // temp file for import + tempFileTemplate = template.Must(template.New("temp").Parse(`package {{.Name}} + +import "github.com/cucumber/godog" + +var _ = godog.Version +`)) +) + +// Build creates a test package like go test command at given target path. +// If there are no go files in tested directory, then +// it simply builds a godog executable to scan features. +// +// If there are go test files, it first builds a test +// package with standard go test command. +// +// Finally it generates godog suite executable which +// registers exported godog contexts from the test files +// of tested package. +// +// Returns the path to generated executable +func Build(bin string) error { + abs, err := filepath.Abs(".") + if err != nil { + return err + } + + // we allow package to be nil, if godog is run only when + // there is a feature file in empty directory + pkg := importPackage(abs) + src, err := buildTestMain(pkg) + if err != nil { + return err + } + + // may need to produce temp file for godog dependency + srcTemp, err := buildTempFile(pkg) + if err != nil { + return err + } + + if srcTemp != nil { + // @TODO: in case of modules we cannot build it our selves, we need to have this hacky option + pathTemp := filepath.Join(abs, "godog_dependency_file_test.go") + err = ioutil.WriteFile(pathTemp, srcTemp, 0644) + if err != nil { + return err + } + defer os.Remove(pathTemp) + } + + workdir := "" + testdir := workdir + + // build and compile the tested package. + // generated test executable will be removed + // since we do not need it for godog suite. + // we also print back the temp WORK directory + // go has built. We will reuse it for our suite workdir. + temp := fmt.Sprintf(filepath.Join("%s", "temp-%d.test"), os.TempDir(), time.Now().UnixNano()) + if os.Getenv("GO111MODULE") != "off" { + modTidyOutput, err := exec.Command("go", "mod", "tidy").CombinedOutput() + if err != nil { + return fmt.Errorf("failed to tidy modules in tested package: %s, reason: %v, output: %s", abs, err, string(modTidyOutput)) + } + } + testOutput, err := exec.Command("go", "test", "-c", "-work", "-o", temp).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to compile tested package: %s, reason: %v, output: %s", abs, err, string(testOutput)) + } + defer os.Remove(temp) + + // extract go-build temporary directory as our workdir + linesOut := strings.Split(strings.TrimSpace(string(testOutput)), "\n") + // it may have some compilation warnings, in the output, but these are not + // considered to be errors, since command exit status is 0 + for _, ln := range linesOut { + if !strings.HasPrefix(ln, "WORK=") { + continue + } + workdir = strings.Replace(ln, "WORK=", "", 1) + break + } + + if strings.Contains(string(testOutput), "[no test files]") { + return fmt.Errorf("incorrect project structure: no test files found") + } + + // may not locate it in output + if workdir == testdir { + return fmt.Errorf("expected WORK dir path to be present in output: %s", string(testOutput)) + } + + // check whether workdir exists + stats, err := os.Stat(workdir) + if os.IsNotExist(err) { + return fmt.Errorf("expected WORK dir: %s to be available", workdir) + } + + if !stats.IsDir() { + return fmt.Errorf("expected WORK dir: %s to be directory", workdir) + } + testdir = filepath.Join(workdir, "b001") + defer os.RemoveAll(workdir) + + // replace _testmain.go file with our own + testmain := filepath.Join(testdir, "_testmain.go") + err = ioutil.WriteFile(testmain, src, 0644) + if err != nil { + return err + } + + // godog package may be vendored and may need importmap + vendored := maybeVendoredGodog() + + // compile godog testmain package archive + // we do not depend on CGO so a lot of checks are not necessary + linkerCfg := filepath.Join(testdir, "importcfg.link") + compilerCfg := linkerCfg + + if vendored != nil { + data, err := ioutil.ReadFile(linkerCfg) + if err != nil { + return err + } + + data = append(data, []byte(fmt.Sprintf("importmap %s=%s\n", godogImportPath, vendored.ImportPath))...) + compilerCfg = filepath.Join(testdir, "importcfg") + + err = ioutil.WriteFile(compilerCfg, data, 0644) + if err != nil { + return err + } + } + + testMainPkgOut := filepath.Join(testdir, "main.a") + args := []string{ + "-o", testMainPkgOut, + "-importcfg", compilerCfg, + "-p", "main", + "-complete", + } + + if err := filterImportCfg(compilerCfg); err != nil { + return err + } + + args = append(args, "-pack", testmain) + cmd := exec.Command(compiler, args...) + cmd.Env = os.Environ() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to compile testmain package: %v - output: %s", err, string(out)) + } + + // link test suite executable + args = []string{ + "-o", bin, + "-importcfg", linkerCfg, + "-buildmode=exe", + } + args = append(args, testMainPkgOut) + cmd = exec.Command(linker, args...) + cmd.Env = os.Environ() + + out, err = cmd.CombinedOutput() + if err != nil { + msg := `failed to link test executable: + reason: %s + command: %s` + return fmt.Errorf(msg, string(out), linker+" '"+strings.Join(args, "' '")+"'") + } + + return nil +} + +// filterImportCfg strips unsupported lines from imports configuration. +func filterImportCfg(path string) error { + orig, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read %s: %w", path, err) + } + + res := "" + for _, l := range strings.Split(string(orig), "\n") { + if !strings.HasPrefix(l, "modinfo") { + res += l + "\n" + } + } + err = ioutil.WriteFile(path, []byte(res), 0600) + if err != nil { + return fmt.Errorf("failed to write %s: %w", path, err) + } + + return nil +} + +func maybeVendoredGodog() *build.Package { + dir, err := filepath.Abs(".") + if err != nil { + return nil + } + + for _, gopath := range gopaths { + gopath = filepath.Join(gopath, "src") + for strings.HasPrefix(dir, gopath) && dir != gopath { + pkg, err := build.ImportDir(filepath.Join(dir, "vendor", godogImportPath), 0) + if err != nil { + dir = filepath.Dir(dir) + continue + } + return pkg + } + } + return nil +} + +func normaliseLocalImportPath(dir string) string { + return path.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir))) +} +func importPackage(dir string) *build.Package { + pkg, _ := build.ImportDir(dir, 0) + + // normalize import path for local import packages + // taken from go source code + // see: https://github.com/golang/go/blob/go1.7rc5/src/cmd/go/pkg.go#L279 + if pkg != nil && pkg.ImportPath == "." { + pkg.ImportPath = normaliseLocalImportPath(dir) + } + + return pkg +} + +// from go src +func makeImportValid(r rune) rune { + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return '_' + } + return r +} + +// build temporary file content if godog +// package is not present in currently tested package +func buildTempFile(pkg *build.Package) ([]byte, error) { + shouldBuild := true + var name string + if pkg != nil { + name = pkg.Name + all := pkg.Imports + all = append(all, pkg.TestImports...) + all = append(all, pkg.XTestImports...) + for _, imp := range all { + if imp == godogImportPath { + shouldBuild = false + break + } + } + + // maybe we are testing the godog package on it's own + if name == "godog" { + if parseImport(pkg.ImportPath, pkg.Root) == godogImportPath { + shouldBuild = false + } + } + } + + if name == "" { + name = "main" + } + + if !shouldBuild { + return nil, nil + } + + data := struct{ Name string }{name} + var buf bytes.Buffer + if err := tempFileTemplate.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// buildTestMain if given package is valid +// it scans test files for contexts +// and produces a testmain source code. +func buildTestMain(pkg *build.Package) ([]byte, error) { + var ( + ctxs, xctxs contexts + err error + name = "main" + importPath string + ) + + if nil != pkg { + if ctxs, err = processPackageTestFiles(pkg.TestGoFiles); err != nil { + return nil, err + } + + if xctxs, err = processPackageTestFiles(pkg.XTestGoFiles); err != nil { + return nil, err + } + + importPath = parseImport(pkg.ImportPath, pkg.Root) + name = pkg.Name + } else { + name = "main" + } + + data := struct { + Name string + ImportPath string + TestSuiteContexts []string + ScenarioContexts []string + XTestSuiteContexts []string + XScenarioContexts []string + }{ + Name: name, + ImportPath: importPath, + TestSuiteContexts: ctxs.testSuiteCtxs, + ScenarioContexts: ctxs.scenarioCtxs, + XTestSuiteContexts: xctxs.testSuiteCtxs, + XScenarioContexts: xctxs.scenarioCtxs, + } + + var buf bytes.Buffer + if err = runnerTemplate.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// parseImport parses the import path to deal with go module. +func parseImport(rawPath, rootPath string) string { + // with go > 1.11 and go module enabled out of the GOPATH, + // the import path begins with an underscore and the GOPATH is unknown on build. + if rootPath != "" { + // go < 1.11 or it's a module inside the GOPATH + return rawPath + } + // for module support, query the module import path + cmd := exec.Command("go", "list", "-m", "-json") + out, err := cmd.StdoutPipe() + if err != nil { + // Unable to read stdout + return rawPath + } + if cmd.Start() != nil { + // Does not using modules + return rawPath + } + var mod struct { + Dir string `json:"Dir"` + Path string `json:"Path"` + } + if json.NewDecoder(out).Decode(&mod) != nil { + // Unexpected result + return rawPath + } + if cmd.Wait() != nil { + return rawPath + } + // Concatenates the module path with the current sub-folders if needed + return mod.Path + filepath.ToSlash(strings.TrimPrefix(rawPath, normaliseLocalImportPath(mod.Dir))) +} + +type contexts struct { + deprecatedFeatureCtxs []string + testSuiteCtxs []string + scenarioCtxs []string +} + +func (ctxs contexts) validate() error { + var allCtxs []string + allCtxs = append(allCtxs, ctxs.deprecatedFeatureCtxs...) + allCtxs = append(allCtxs, ctxs.testSuiteCtxs...) + allCtxs = append(allCtxs, ctxs.scenarioCtxs...) + + var failed []string + for _, ctx := range allCtxs { + runes := []rune(ctx) + if unicode.IsLower(runes[0]) { + expected := append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...) + failed = append(failed, fmt.Sprintf("%s - should be: %s", ctx, string(expected))) + } + } + + if len(failed) > 0 { + return fmt.Errorf("godog contexts must be exported:\n\t%s", strings.Join(failed, "\n\t")) + } + + return nil +} + +// processPackageTestFiles runs through ast of each test +// file pack and looks for godog suite contexts to register +// on run +func processPackageTestFiles(packs ...[]string) (ctxs contexts, _ error) { + fset := token.NewFileSet() + for _, pack := range packs { + for _, testFile := range pack { + node, err := parser.ParseFile(fset, testFile, nil, 0) + if err != nil { + return ctxs, err + } + + ctxs.testSuiteCtxs = append(ctxs.testSuiteCtxs, astContexts(node, "TestSuiteContext")...) + ctxs.scenarioCtxs = append(ctxs.scenarioCtxs, astContexts(node, "ScenarioContext")...) + } + } + + return ctxs, ctxs.validate() +} + +func findToolDir() string { + if out, err := exec.Command("go", "env", "GOTOOLDIR").Output(); err != nil { + return filepath.Clean(strings.TrimSpace(string(out))) + } + return filepath.Clean(build.ToolDir) +} diff --git a/vendor/github.com/cucumber/godog/internal/flags/flags.go b/vendor/github.com/cucumber/godog/internal/flags/flags.go new file mode 100644 index 000000000..1bd67e591 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/flags/flags.go @@ -0,0 +1,49 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// BindRunCmdFlags is an internal func to bind run subcommand flags. +func BindRunCmdFlags(prefix string, flagSet *pflag.FlagSet, opts *Options) { + if opts.Concurrency == 0 { + opts.Concurrency = 1 + } + + if opts.Format == "" { + opts.Format = "pretty" + } + + flagSet.BoolVar(&opts.NoColors, prefix+"no-colors", opts.NoColors, "disable ansi colors") + flagSet.IntVarP(&opts.Concurrency, prefix+"concurrency", "c", opts.Concurrency, "run the test suite with concurrency") + flagSet.StringVarP(&opts.Tags, prefix+"tags", "t", opts.Tags, `filter scenarios by tags, expression can be: + "@wip" run all scenarios with wip tag + "~@wip" exclude all scenarios with wip tag + "@wip && ~@new" run wip scenarios, but exclude new + "@wip,@undone" run wip or undone scenarios`) + flagSet.StringVarP(&opts.Format, prefix+"format", "f", opts.Format, `will write a report according to the selected formatter + +usage: + -f + will use the formatter and write the report on stdout + -f : + will use the formatter and write the report to the file path + +built-in formatters are: + progress prints a character per step + cucumber produces a Cucumber JSON report + events produces JSON event stream, based on spec: 0.1.0 + junit produces JUnit compatible XML report + pretty prints every feature with runtime statuses + `) + + flagSet.BoolVarP(&opts.ShowStepDefinitions, prefix+"definitions", "d", opts.ShowStepDefinitions, "print all available step definitions") + flagSet.BoolVar(&opts.StopOnFailure, prefix+"stop-on-failure", opts.StopOnFailure, "stop processing on first failed scenario") + flagSet.BoolVar(&opts.Strict, prefix+"strict", opts.Strict, "fail suite when there are pending or undefined or ambiguous steps") + + flagSet.Int64Var(&opts.Randomize, prefix+"random", opts.Randomize, `randomly shuffle the scenario execution order + --random +specify SEED to reproduce the shuffling from a previous run + --random=5738`) + flagSet.Lookup(prefix + "random").NoOptDefVal = "-1" +} diff --git a/vendor/github.com/cucumber/godog/internal/flags/options.go b/vendor/github.com/cucumber/godog/internal/flags/options.go new file mode 100644 index 000000000..40acea652 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/flags/options.go @@ -0,0 +1,88 @@ +package flags + +import ( + "context" + "io" + "io/fs" + "testing" +) + +// Options are suite run options +// flags are mapped to these options. +// +// It can also be used together with godog.RunWithOptions +// to run test suite from go source directly +// +// See the flags for more details +type Options struct { + // Print step definitions found and exit + ShowStepDefinitions bool + + // Randomize, if not `0`, will be used to run scenarios in a random order. + // + // Randomizing scenario order is especially helpful for detecting + // situations where you have state leaking between scenarios, which can + // cause flickering or fragile tests. + // + // The default value of `0` means "do not randomize". + // + // The magic value of `-1` means "pick a random seed for me", and godog will + // assign a seed on it's own during the `RunWithOptions` phase, similar to if + // you specified `--random` on the command line. + // + // Any other value will be used as the random seed for shuffling. Re-using the + // same seed will allow you to reproduce the shuffle order of a previous run + // to isolate an error condition. + Randomize int64 + + // Stops on the first failure + StopOnFailure bool + + // Fail suite when there are pending or undefined or ambiguous steps + Strict bool + + // Forces ansi color stripping + NoColors bool + + // Various filters for scenarios parsed + // from feature files + Tags string + + // Dialect to be used to parse feature files. If not set, default to "en". + Dialect string + + // The formatter name + Format string + + // Concurrency rate, not all formatters accepts this + Concurrency int + + // All feature file paths + Paths []string + + // Where it should print formatter output + Output io.Writer + + // DefaultContext is used as initial context instead of context.Background(). + DefaultContext context.Context + + // TestingT runs scenarios as subtests. + TestingT *testing.T + + // FeatureContents allows passing in each feature manually + // where the contents of each feature is stored as a byte slice + // in a map entry + FeatureContents []Feature + + // FS allows passing in an `fs.FS` to read features from, such as an `embed.FS` + // or os.DirFS(string). + FS fs.FS + + // ShowHelp enables suite to show CLI flags usage help and exit. + ShowHelp bool +} + +type Feature struct { + Name string + Contents []byte +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go new file mode 100644 index 000000000..5530c0c24 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go @@ -0,0 +1,104 @@ +package formatters + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/utils" +) + +var ( + red = colors.Red + redb = colors.Bold(colors.Red) + green = colors.Green + blackb = colors.Bold(colors.Black) + yellow = colors.Yellow + cyan = colors.Cyan + cyanb = colors.Bold(colors.Cyan) + whiteb = colors.Bold(colors.White) +) + +// repeats a space n times +var s = utils.S + +var ( + passed = models.Passed + failed = models.Failed + skipped = models.Skipped + undefined = models.Undefined + pending = models.Pending + ambiguous = models.Ambiguous +) + +type sortFeaturesByName []*models.Feature + +func (s sortFeaturesByName) Len() int { return len(s) } +func (s sortFeaturesByName) Less(i, j int) bool { return s[i].Feature.Name < s[j].Feature.Name } +func (s sortFeaturesByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type sortPicklesByID []*messages.Pickle + +func (s sortPicklesByID) Len() int { return len(s) } +func (s sortPicklesByID) Less(i, j int) bool { + iID := mustConvertStringToInt(s[i].Id) + jID := mustConvertStringToInt(s[j].Id) + return iID < jID +} +func (s sortPicklesByID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type sortPickleStepResultsByPickleStepID []models.PickleStepResult + +func (s sortPickleStepResultsByPickleStepID) Len() int { return len(s) } +func (s sortPickleStepResultsByPickleStepID) Less(i, j int) bool { + iID := mustConvertStringToInt(s[i].PickleStepID) + jID := mustConvertStringToInt(s[j].PickleStepID) + return iID < jID +} +func (s sortPickleStepResultsByPickleStepID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func mustConvertStringToInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + + return i +} + +// DefinitionID ... +func DefinitionID(sd *models.StepDefinition) string { + ptr := sd.HandlerValue.Pointer() + f := runtime.FuncForPC(ptr) + dir := filepath.Dir(sd.File) + fn := strings.Replace(f.Name(), dir, "", -1) + var parts []string + for _, gr := range matchFuncDefRef.FindAllStringSubmatch(fn, -1) { + parts = append(parts, strings.Trim(gr[1], "_.")) + } + if len(parts) > 0 { + // case when suite is a structure with methods + fn = strings.Join(parts, ".") + } else { + // case when steps are just plain funcs + fn = strings.Trim(fn, "_.") + } + + if pkg := os.Getenv("GODOG_TESTED_PACKAGE"); len(pkg) > 0 { + fn = strings.Replace(fn, pkg, "", 1) + fn = strings.TrimLeft(fn, ".") + fn = strings.Replace(fn, "..", ".", -1) + } + + return fmt.Sprintf("%s:%d -> %s", filepath.Base(sd.File), sd.Line, fn) +} + +var matchFuncDefRef = regexp.MustCompile(`\(([^\)]+)\)`) diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go new file mode 100644 index 000000000..607a1c065 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go @@ -0,0 +1,272 @@ +package formatters + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +// BaseFormatterFunc implements the FormatterFunc for the base formatter. +func BaseFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return NewBase(suite, out) +} + +// NewBase creates a new base formatter. +func NewBase(suite string, out io.Writer) *Base { + return &Base{ + suiteName: suite, + indent: 2, + out: out, + Lock: new(sync.Mutex), + } +} + +// Base is a base formatter. +type Base struct { + suiteName string + out io.Writer + indent int + + Storage *storage.Storage + Lock *sync.Mutex +} + +// SetStorage assigns gherkin data storage. +func (f *Base) SetStorage(st *storage.Storage) { + f.Lock.Lock() + defer f.Lock.Unlock() + + f.Storage = st +} + +// TestRunStarted is triggered on test start. +func (f *Base) TestRunStarted() {} + +// Feature receives gherkin document. +func (f *Base) Feature(*messages.GherkinDocument, string, []byte) {} + +// Pickle receives scenario. +func (f *Base) Pickle(*messages.Pickle) {} + +// Defined receives step definition. +func (f *Base) Defined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Passed captures passed step. +func (f *Base) Passed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {} + +// Skipped captures skipped step. +func (f *Base) Skipped(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Undefined captures undefined step. +func (f *Base) Undefined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Failed captures failed step. +func (f *Base) Failed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) { +} + +// Pending captures pending step. +func (f *Base) Pending(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Ambiguous captures ambiguous step. +func (f *Base) Ambiguous(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) { +} + +// Summary renders summary information. +func (f *Base) Summary() { + var totalSc, passedSc, undefinedSc int + var totalSt, passedSt, failedSt, skippedSt, pendingSt, undefinedSt, ambiguousSt int + + pickleResults := f.Storage.MustGetPickleResults() + for _, pr := range pickleResults { + var prStatus models.StepResultStatus + totalSc++ + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pr.PickleID) + + if len(pickleStepResults) == 0 { + prStatus = undefined + } + + for _, sr := range pickleStepResults { + totalSt++ + + switch sr.Status { + case passed: + passedSt++ + case failed: + prStatus = failed + failedSt++ + case ambiguous: + prStatus = ambiguous + ambiguousSt++ + case skipped: + skippedSt++ + case undefined: + prStatus = undefined + undefinedSt++ + case pending: + prStatus = pending + pendingSt++ + } + } + + if prStatus == passed { + passedSc++ + } else if prStatus == undefined { + undefinedSc++ + } + } + + var steps, parts, scenarios []string + if passedSt > 0 { + steps = append(steps, green(fmt.Sprintf("%d passed", passedSt))) + } + if failedSt > 0 { + parts = append(parts, red(fmt.Sprintf("%d failed", failedSt))) + steps = append(steps, red(fmt.Sprintf("%d failed", failedSt))) + } + if pendingSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d pending", pendingSt))) + steps = append(steps, yellow(fmt.Sprintf("%d pending", pendingSt))) + } + if ambiguousSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt))) + steps = append(steps, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt))) + } + if undefinedSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc))) + steps = append(steps, yellow(fmt.Sprintf("%d undefined", undefinedSt))) + } else if undefinedSc > 0 { + // there may be some scenarios without steps + parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc))) + } + if skippedSt > 0 { + steps = append(steps, cyan(fmt.Sprintf("%d skipped", skippedSt))) + } + if passedSc > 0 { + scenarios = append(scenarios, green(fmt.Sprintf("%d passed", passedSc))) + } + scenarios = append(scenarios, parts...) + + testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt + elapsed := utils.TimeNowFunc().Sub(testRunStartedAt) + + fmt.Fprintln(f.out, "") + + if totalSc == 0 { + fmt.Fprintln(f.out, "No scenarios") + } else { + fmt.Fprintf(f.out, "%d scenarios (%s)\n", totalSc, strings.Join(scenarios, ", ")) + } + + if totalSt == 0 { + fmt.Fprintln(f.out, "No steps") + } else { + fmt.Fprintf(f.out, "%d steps (%s)\n", totalSt, strings.Join(steps, ", ")) + } + + elapsedString := elapsed.String() + if elapsed.Nanoseconds() == 0 { + // go 1.5 and 1.6 prints 0 instead of 0s, if duration is zero. + elapsedString = "0s" + } + fmt.Fprintln(f.out, elapsedString) + + // prints used randomization seed + seed, err := strconv.ParseInt(os.Getenv("GODOG_SEED"), 10, 64) + if err == nil && seed != 0 { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, "Randomized with seed:", colors.Yellow(seed)) + } + + if text := f.Snippets(); text != "" { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, yellow("You can implement step definitions for undefined steps with these snippets:")) + fmt.Fprintln(f.out, yellow(text)) + } +} + +// Snippets returns code suggestions for undefined steps. +func (f *Base) Snippets() string { + undefinedStepResults := f.Storage.MustGetPickleStepResultsByStatus(undefined) + if len(undefinedStepResults) == 0 { + return "" + } + + var index int + var snips []undefinedSnippet + // build snippets + for _, u := range undefinedStepResults { + pickleStep := f.Storage.MustGetPickleStep(u.PickleStepID) + + steps := []string{pickleStep.Text} + arg := pickleStep.Argument + if u.Def != nil { + steps = u.Def.Undefined + arg = nil + } + for _, step := range steps { + expr := snippetExprCleanup.ReplaceAllString(step, "\\$1") + expr = snippetNumbers.ReplaceAllString(expr, "(\\d+)") + expr = snippetExprQuoted.ReplaceAllString(expr, "$1\"([^\"]*)\"$2") + expr = "^" + strings.TrimSpace(expr) + "$" + + name := snippetNumbers.ReplaceAllString(step, " ") + name = snippetExprQuoted.ReplaceAllString(name, " ") + name = strings.TrimSpace(snippetMethodName.ReplaceAllString(name, "")) + var words []string + for i, w := range strings.Split(name, " ") { + switch { + case i != 0: + w = strings.Title(w) + case len(w) > 0: + w = string(unicode.ToLower(rune(w[0]))) + w[1:] + } + words = append(words, w) + } + name = strings.Join(words, "") + if len(name) == 0 { + index++ + name = fmt.Sprintf("StepDefinitioninition%d", index) + } + + var found bool + for _, snip := range snips { + if snip.Expr == expr { + found = true + break + } + } + if !found { + snips = append(snips, undefinedSnippet{Method: name, Expr: expr, argument: arg}) + } + } + } + + sort.Sort(snippetSortByMethod(snips)) + + var buf bytes.Buffer + if err := undefinedSnippetsTpl.Execute(&buf, snips); err != nil { + panic(err) + } + // there may be trailing spaces + return strings.Replace(buf.String(), " \n", "\n", -1) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go new file mode 100644 index 000000000..31380c975 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go @@ -0,0 +1,326 @@ +package formatters + +/* + The specification for the formatting originated from https://www.relishapp.com/cucumber/cucumber/docs/formatters/json-output-formatter. + I found that documentation was misleading or out dated. To validate formatting I create a ruby cucumber test harness and ran the + same feature files through godog and the ruby cucumber. + + The docstrings in the cucumber.feature represent the cucumber output for those same feature definitions. + + I did note that comments in ruby could be at just about any level in particular Feature, Scenario and Step. In godog I + could only find comments under the Feature data structure. +*/ + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + messages "github.com/cucumber/messages/go/v21" +) + +func init() { + formatters.Format("cucumber", "Produces cucumber JSON format output.", CucumberFormatterFunc) +} + +// CucumberFormatterFunc implements the FormatterFunc for the cucumber formatter +func CucumberFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Cuke{Base: NewBase(suite, out)} +} + +// Cuke ... +type Cuke struct { + *Base +} + +// Summary renders test result as Cucumber JSON. +func (f *Cuke) Summary() { + features := f.Storage.MustGetFeatures() + + res := f.buildCukeFeatures(features) + + dat, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + + fmt.Fprintf(f.out, "%s\n", string(dat)) +} + +func (f *Cuke) buildCukeFeatures(features []*models.Feature) (res []CukeFeatureJSON) { + sort.Sort(sortFeaturesByName(features)) + + res = make([]CukeFeatureJSON, len(features)) + + for idx, feat := range features { + cukeFeature := buildCukeFeature(feat) + + pickles := f.Storage.MustGetPickles(feat.Uri) + sort.Sort(sortPicklesByID(pickles)) + + cukeFeature.Elements = f.buildCukeElements(pickles) + + for jdx, elem := range cukeFeature.Elements { + elem.ID = cukeFeature.ID + ";" + makeCukeID(elem.Name) + elem.ID + elem.Tags = append(cukeFeature.Tags, elem.Tags...) + cukeFeature.Elements[jdx] = elem + } + + res[idx] = cukeFeature + } + + return res +} + +func (f *Cuke) buildCukeElements(pickles []*messages.Pickle) (res []cukeElement) { + res = make([]cukeElement, len(pickles)) + + for idx, pickle := range pickles { + pickleResult := f.Storage.MustGetPickleResult(pickle.Id) + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id) + + cukeElement := f.buildCukeElement(pickle) + + stepStartedAt := pickleResult.StartedAt + + cukeElement.Steps = make([]cukeStep, len(pickleStepResults)) + sort.Sort(sortPickleStepResultsByPickleStepID(pickleStepResults)) + + for jdx, stepResult := range pickleStepResults { + cukeStep := f.buildCukeStep(pickle, stepResult) + + stepResultFinishedAt := stepResult.FinishedAt + d := int(stepResultFinishedAt.Sub(stepStartedAt).Nanoseconds()) + stepStartedAt = stepResultFinishedAt + + cukeStep.Result.Duration = &d + if stepResult.Status == undefined || + stepResult.Status == pending || + stepResult.Status == skipped || + stepResult.Status == ambiguous { + cukeStep.Result.Duration = nil + } + + cukeElement.Steps[jdx] = cukeStep + } + + res[idx] = cukeElement + } + + return res +} + +type cukeComment struct { + Value string `json:"value"` + Line int `json:"line"` +} + +type cukeDocstring struct { + Value string `json:"value"` + ContentType string `json:"content_type"` + Line int `json:"line"` +} + +type cukeTag struct { + Name string `json:"name"` + Line int `json:"line"` +} + +type cukeResult struct { + Status string `json:"status"` + Error string `json:"error_message,omitempty"` + Duration *int `json:"duration,omitempty"` +} + +type cukeMatch struct { + Location string `json:"location"` +} + +type cukeEmbedding struct { + Name string `json:"name"` + MimeType string `json:"mime_type"` + Data string `json:"data"` +} + +type cukeStep struct { + Keyword string `json:"keyword"` + Name string `json:"name"` + Line int `json:"line"` + Docstring *cukeDocstring `json:"doc_string,omitempty"` + Match cukeMatch `json:"match"` + Result cukeResult `json:"result"` + DataTable []*cukeDataTableRow `json:"rows,omitempty"` + Embeddings []cukeEmbedding `json:"embeddings,omitempty"` +} + +type cukeDataTableRow struct { + Cells []string `json:"cells"` +} + +type cukeElement struct { + ID string `json:"id"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Line int `json:"line"` + Type string `json:"type"` + Tags []cukeTag `json:"tags,omitempty"` + Steps []cukeStep `json:"steps,omitempty"` +} + +// CukeFeatureJSON ... +type CukeFeatureJSON struct { + URI string `json:"uri"` + ID string `json:"id"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Line int `json:"line"` + Comments []cukeComment `json:"comments,omitempty"` + Tags []cukeTag `json:"tags,omitempty"` + Elements []cukeElement `json:"elements,omitempty"` +} + +func buildCukeFeature(feat *models.Feature) CukeFeatureJSON { + cukeFeature := CukeFeatureJSON{ + URI: feat.Uri, + ID: makeCukeID(feat.Feature.Name), + Keyword: feat.Feature.Keyword, + Name: feat.Feature.Name, + Description: feat.Feature.Description, + Line: int(feat.Feature.Location.Line), + Comments: make([]cukeComment, len(feat.Comments)), + Tags: make([]cukeTag, len(feat.Feature.Tags)), + } + + for idx, element := range feat.Feature.Tags { + cukeFeature.Tags[idx].Line = int(element.Location.Line) + cukeFeature.Tags[idx].Name = element.Name + } + + for idx, comment := range feat.Comments { + cukeFeature.Comments[idx].Value = strings.TrimSpace(comment.Text) + cukeFeature.Comments[idx].Line = int(comment.Location.Line) + } + + return cukeFeature +} + +func (f *Cuke) buildCukeElement(pickle *messages.Pickle) (cukeElement cukeElement) { + feature := f.Storage.MustGetFeature(pickle.Uri) + scenario := feature.FindScenario(pickle.AstNodeIds[0]) + + cukeElement.Name = pickle.Name + cukeElement.Line = int(scenario.Location.Line) + cukeElement.Description = scenario.Description + cukeElement.Keyword = scenario.Keyword + cukeElement.Type = "scenario" + + cukeElement.Tags = make([]cukeTag, len(scenario.Tags)) + for idx, element := range scenario.Tags { + cukeElement.Tags[idx].Line = int(element.Location.Line) + cukeElement.Tags[idx].Name = element.Name + } + + if len(pickle.AstNodeIds) == 1 { + return + } + + example, _ := feature.FindExample(pickle.AstNodeIds[1]) + + for _, tag := range example.Tags { + tag := cukeTag{Line: int(tag.Location.Line), Name: tag.Name} + cukeElement.Tags = append(cukeElement.Tags, tag) + } + + examples := scenario.Examples + if len(examples) > 0 { + rowID := pickle.AstNodeIds[1] + + for _, example := range examples { + for idx, row := range example.TableBody { + if rowID == row.Id { + cukeElement.ID += fmt.Sprintf(";%s;%d", makeCukeID(example.Name), idx+2) + cukeElement.Line = int(row.Location.Line) + } + } + } + } + + return cukeElement +} + +func (f *Cuke) buildCukeStep(pickle *messages.Pickle, stepResult models.PickleStepResult) (cukeStep cukeStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + line := step.Location.Line + + cukeStep.Name = pickleStep.Text + cukeStep.Line = int(line) + cukeStep.Keyword = step.Keyword + + arg := pickleStep.Argument + + if arg != nil { + if arg.DocString != nil && step.DocString != nil { + cukeStep.Docstring = &cukeDocstring{} + cukeStep.Docstring.ContentType = strings.TrimSpace(arg.DocString.MediaType) + if step.Location != nil { + cukeStep.Docstring.Line = int(step.DocString.Location.Line) + } + cukeStep.Docstring.Value = arg.DocString.Content + } + + if arg.DataTable != nil { + cukeStep.DataTable = make([]*cukeDataTableRow, len(arg.DataTable.Rows)) + for i, row := range arg.DataTable.Rows { + cells := make([]string, len(row.Cells)) + for j, cell := range row.Cells { + cells[j] = cell.Value + } + cukeStep.DataTable[i] = &cukeDataTableRow{Cells: cells} + } + } + } + + if stepResult.Def != nil { + cukeStep.Match.Location = strings.Split(DefinitionID(stepResult.Def), " ")[0] + } + + cukeStep.Result.Status = stepResult.Status.String() + if stepResult.Err != nil { + cukeStep.Result.Error = stepResult.Err.Error() + } + + if stepResult.Status == undefined || stepResult.Status == pending || stepResult.Status == ambiguous { + cukeStep.Match.Location = fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line) + } + + if stepResult.Attachments != nil { + attachments := []cukeEmbedding{} + + for _, a := range stepResult.Attachments { + attachments = append(attachments, cukeEmbedding{ + Name: a.Name, + Data: base64.StdEncoding.EncodeToString(a.Data), + MimeType: a.MimeType, + }) + } + + if len(attachments) > 0 { + cukeStep.Embeddings = attachments + } + } + return cukeStep +} + +func makeCukeID(name string) string { + return strings.Replace(strings.ToLower(name), " ", "-", -1) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go new file mode 100644 index 000000000..c5ffcb50e --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go @@ -0,0 +1,346 @@ +package formatters + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/utils" + messages "github.com/cucumber/messages/go/v21" +) + +const nanoSec = 1000000 +const spec = "0.1.0" + +func init() { + formatters.Format("events", fmt.Sprintf("Produces JSON event stream, based on spec: %s.", spec), EventsFormatterFunc) +} + +// EventsFormatterFunc implements the FormatterFunc for the events formatter +func EventsFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Events{Base: NewBase(suite, out)} +} + +// Events - Events formatter +type Events struct { + *Base +} + +func (f *Events) event(ev interface{}) { + data, err := json.Marshal(ev) + if err != nil { + panic(fmt.Sprintf("failed to marshal stream event: %+v - %v", ev, err)) + } + fmt.Fprintln(f.out, string(data)) +} + +// Pickle receives scenario. +func (f *Events) Pickle(pickle *messages.Pickle) { + f.Base.Pickle(pickle) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + }{ + "TestCaseStarted", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + }) + + if len(pickle.Steps) == 0 { + // @TODO: is status undefined or passed? when there are no steps + // for this scenario + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + }{ + "TestCaseFinished", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + "undefined", + }) + } +} + +// TestRunStarted is triggered on test start. +func (f *Events) TestRunStarted() { + f.Base.TestRunStarted() + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Version string `json:"version"` + Timestamp int64 `json:"timestamp"` + Suite string `json:"suite"` + }{ + "TestRunStarted", + spec, + utils.TimeNowFunc().UnixNano() / nanoSec, + f.suiteName, + }) +} + +// Feature receives gherkin document. +func (f *Events) Feature(ft *messages.GherkinDocument, p string, c []byte) { + f.Base.Feature(ft, p, c) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Source string `json:"source"` + }{ + "TestSource", + fmt.Sprintf("%s:%d", p, ft.Feature.Location.Line), + string(c), + }) +} + +// Summary pushes summary information to JSON stream. +func (f *Events) Summary() { + // @TODO: determine status + status := passed + + f.Storage.MustGetPickleStepResultsByStatus(failed) + + if len(f.Storage.MustGetPickleStepResultsByStatus(failed)) > 0 { + status = failed + } else if len(f.Storage.MustGetPickleStepResultsByStatus(passed)) == 0 { + if len(f.Storage.MustGetPickleStepResultsByStatus(undefined)) > len(f.Storage.MustGetPickleStepResultsByStatus(pending)) { + status = undefined + } else { + status = pending + } + } + + snips := f.Snippets() + if len(snips) > 0 { + snips = "You can implement step definitions for undefined steps with these snippets:\n" + snips + } + + f.event(&struct { + Event string `json:"event"` + Status string `json:"status"` + Timestamp int64 `json:"timestamp"` + Snippets string `json:"snippets"` + Memory string `json:"memory"` + }{ + "TestRunFinished", + status.String(), + utils.TimeNowFunc().UnixNano() / nanoSec, + snips, + "", // @TODO not sure that could be correctly implemented + }) +} + +func (f *Events) step(pickle *messages.Pickle, pickleStep *messages.PickleStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + var errMsg string + if pickleStepResult.Err != nil { + errMsg = pickleStepResult.Err.Error() + } + + if pickleStepResult.Attachments != nil { + for _, attachment := range pickleStepResult.Attachments { + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + ContentEncoding string `json:"contentEncoding"` + FileName string `json:"fileName"` + MimeType string `json:"mimeType"` + Body string `json:"body"` + }{ + "Attachment", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + messages.AttachmentContentEncoding_BASE64.String(), + attachment.Name, + attachment.MimeType, + string(attachment.Data), + }) + + } + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + Summary string `json:"summary,omitempty"` + }{ + "TestStepFinished", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + pickleStepResult.Status.String(), + errMsg, + }) + + if isLastStep(pickle, pickleStep) { + var status string + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id) + for _, stepResult := range pickleStepResults { + switch stepResult.Status { + case passed, failed, undefined, pending, ambiguous: + status = stepResult.Status.String() + } + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + }{ + "TestCaseFinished", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + status, + }) + } +} + +// Defined receives step definition. +func (f *Events) Defined(pickle *messages.Pickle, pickleStep *messages.PickleStep, def *formatters.StepDefinition) { + f.Base.Defined(pickle, pickleStep, def) + + f.Lock.Lock() + defer f.Lock.Unlock() + + feature := f.Storage.MustGetFeature(pickle.Uri) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + if def != nil { + matchedDef := f.Storage.MustGetStepDefintionMatch(pickleStep.AstNodeIds[0]) + + m := def.Expr.FindStringSubmatchIndex(pickleStep.Text)[2:] + var args [][2]int + for i := 0; i < len(m)/2; i++ { + pair := m[i : i*2+2] + var idxs [2]int + idxs[0] = pair[0] + idxs[1] = pair[1] + args = append(args, idxs) + } + + if len(args) == 0 { + args = make([][2]int, 0) + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + DefID string `json:"definition_id"` + Args [][2]int `json:"arguments"` + }{ + "StepDefinitionFound", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + DefinitionID(matchedDef), + args, + }) + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + }{ + "TestStepStarted", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + }) +} + +// Passed captures passed step. +func (f *Events) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Skipped captures skipped step. +func (f *Events) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Undefined captures undefined step. +func (f *Events) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Failed captures failed step. +func (f *Events) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Pending captures pending step. +func (f *Events) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Ambiguous captures ambiguous step. +func (f *Events) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +func (f *Events) scenarioLocation(pickle *messages.Pickle) string { + feature := f.Storage.MustGetFeature(pickle.Uri) + scenario := feature.FindScenario(pickle.AstNodeIds[0]) + + line := scenario.Location.Line + if len(pickle.AstNodeIds) == 2 { + _, row := feature.FindExample(pickle.AstNodeIds[1]) + line = row.Location.Line + } + + return fmt.Sprintf("%s:%d", pickle.Uri, line) +} + +func isLastStep(pickle *messages.Pickle, step *messages.PickleStep) bool { + return pickle.Steps[len(pickle.Steps)-1].Id == step.Id +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go new file mode 100644 index 000000000..129b06210 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "sync" + + "github.com/cucumber/godog/formatters" + messages "github.com/cucumber/messages/go/v21" +) + +// WrapOnFlush wrap a `formatters.Formatter` in a `formatters.FlushFormatter`, which only +// executes when `Flush` is called +func WrapOnFlush(fmt formatters.Formatter) formatters.FlushFormatter { + return &onFlushFormatter{ + fmt: fmt, + fns: make([]func(), 0), + mu: &sync.Mutex{}, + } +} + +type onFlushFormatter struct { + fmt formatters.Formatter + fns []func() + mu *sync.Mutex +} + +func (o *onFlushFormatter) Pickle(pickle *messages.Pickle) { + o.fns = append(o.fns, func() { + o.fmt.Pickle(pickle) + }) +} + +func (o *onFlushFormatter) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Passed(pickle, step, definition) + }) +} + +// Ambiguous implements formatters.Formatter. +func (o *onFlushFormatter) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + o.fns = append(o.fns, func() { + o.fmt.Ambiguous(pickle, step, definition, err) + }) +} + +// Defined implements formatters.Formatter. +func (o *onFlushFormatter) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Defined(pickle, step, definition) + }) +} + +// Failed implements formatters.Formatter. +func (o *onFlushFormatter) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + o.fns = append(o.fns, func() { + o.fmt.Failed(pickle, step, definition, err) + }) +} + +// Feature implements formatters.Formatter. +func (o *onFlushFormatter) Feature(pickle *messages.GherkinDocument, p string, c []byte) { + o.fns = append(o.fns, func() { + o.fmt.Feature(pickle, p, c) + }) +} + +// Pending implements formatters.Formatter. +func (o *onFlushFormatter) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Pending(pickle, step, definition) + }) +} + +// Skipped implements formatters.Formatter. +func (o *onFlushFormatter) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Skipped(pickle, step, definition) + }) +} + +// Summary implements formatters.Formatter. +func (o *onFlushFormatter) Summary() { + o.fns = append(o.fns, func() { + o.fmt.Summary() + }) +} + +// TestRunStarted implements formatters.Formatter. +func (o *onFlushFormatter) TestRunStarted() { + o.fns = append(o.fns, func() { + o.fmt.TestRunStarted() + }) +} + +// Undefined implements formatters.Formatter. +func (o *onFlushFormatter) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Undefined(pickle, step, definition) + }) +} + +// Flush the logs. +func (o *onFlushFormatter) Flush() { + o.mu.Lock() + defer o.mu.Unlock() + for _, fn := range o.fns { + fn() + } +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go new file mode 100644 index 000000000..85acabe2e --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go @@ -0,0 +1,246 @@ +package formatters + +import ( + "encoding/xml" + "fmt" + "io" + "os" + "sort" + "strconv" + "time" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/utils" +) + +func init() { + formatters.Format("junit", "Prints junit compatible xml to stdout", JUnitFormatterFunc) +} + +// JUnitFormatterFunc implements the FormatterFunc for the junit formatter +func JUnitFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &JUnit{Base: NewBase(suite, out)} +} + +// JUnit renders test results in JUnit format. +type JUnit struct { + *Base +} + +// Summary renders summary information. +func (f *JUnit) Summary() { + suite := f.buildJUNITPackageSuite() + + _, err := io.WriteString(f.out, xml.Header) + if err != nil { + fmt.Fprintln(os.Stderr, "failed to write junit string:", err) + } + + enc := xml.NewEncoder(f.out) + enc.Indent("", s(2)) + if err = enc.Encode(suite); err != nil { + fmt.Fprintln(os.Stderr, "failed to write junit xml:", err) + } +} + +func junitTimeDuration(from, to time.Time) string { + return strconv.FormatFloat(to.Sub(from).Seconds(), 'f', -1, 64) +} + +// getPickleResult deals with the fact that if there's no result due to 'StopOnFirstFailure' being +// set, MustGetPickleResult panics. +func (f *JUnit) getPickleResult(pickleID string) (res *models.PickleResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + pr := f.Storage.MustGetPickleResult(pickleID) + res = &pr + return +} + +func (f *JUnit) getPickleStepResult(stepID string) (res *models.PickleStepResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + psr := f.Storage.MustGetPickleStepResult(stepID) + res = &psr + return +} + +func (f *JUnit) getPickleStepResultsByPickleID(pickleID string) (res []models.PickleStepResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + res = f.Storage.MustGetPickleStepResultsByPickleID(pickleID) + return +} + +func (f *JUnit) buildJUNITPackageSuite() JunitPackageSuite { + features := f.Storage.MustGetFeatures() + sort.Sort(sortFeaturesByName(features)) + + testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt + + suite := JunitPackageSuite{ + Name: f.suiteName, + TestSuites: make([]*junitTestSuite, len(features)), + Time: junitTimeDuration(testRunStartedAt, utils.TimeNowFunc()), + } + + for idx, feature := range features { + pickles := f.Storage.MustGetPickles(feature.Uri) + sort.Sort(sortPicklesByID(pickles)) + + ts := junitTestSuite{ + Name: feature.Feature.Name, + TestCases: make([]*junitTestCase, len(pickles)), + } + + var testcaseNames = make(map[string]int) + for _, pickle := range pickles { + testcaseNames[pickle.Name] = testcaseNames[pickle.Name] + 1 + } + + firstPickleStartedAt := testRunStartedAt + lastPickleFinishedAt := testRunStartedAt + + var outlineNo = make(map[string]int) + for idx, pickle := range pickles { + tc := junitTestCase{} + tc.Name = pickle.Name + if testcaseNames[tc.Name] > 1 { + outlineNo[tc.Name] = outlineNo[tc.Name] + 1 + tc.Name += fmt.Sprintf(" #%d", outlineNo[tc.Name]) + } + + pickleResult := f.getPickleResult(pickle.Id) + if pickleResult == nil { + tc.Status = skipped.String() + } else { + if idx == 0 { + firstPickleStartedAt = pickleResult.StartedAt + } + lastPickleFinishedAt = pickleResult.StartedAt + } + + if len(pickle.Steps) > 0 { + lastStep := pickle.Steps[len(pickle.Steps)-1] + if lastPickleStepResult := f.getPickleStepResult(lastStep.Id); lastPickleStepResult != nil { + lastPickleFinishedAt = lastPickleStepResult.FinishedAt + } + } + + if pickleResult != nil { + tc.Time = junitTimeDuration(pickleResult.StartedAt, lastPickleFinishedAt) + } + + ts.Tests++ + suite.Tests++ + + pickleStepResults := f.getPickleStepResultsByPickleID(pickle.Id) + for _, stepResult := range pickleStepResults { + pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID) + + switch stepResult.Status { + case passed: + tc.Status = passed.String() + case failed: + tc.Status = failed.String() + tc.Failure = &junitFailure{ + Message: fmt.Sprintf("Step %s: %s", pickleStep.Text, stepResult.Err), + } + case ambiguous: + tc.Status = ambiguous.String() + tc.Error = append(tc.Error, &junitError{ + Type: "ambiguous", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case skipped: + tc.Error = append(tc.Error, &junitError{ + Type: "skipped", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case undefined: + tc.Status = undefined.String() + tc.Error = append(tc.Error, &junitError{ + Type: "undefined", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case pending: + tc.Status = pending.String() + tc.Error = append(tc.Error, &junitError{ + Type: "pending", + Message: fmt.Sprintf("Step %s: TODO: write pending definition", pickleStep.Text), + }) + } + } + + switch tc.Status { + case failed.String(): + ts.Failures++ + suite.Failures++ + case undefined.String(), pending.String(): + ts.Errors++ + suite.Errors++ + } + + ts.TestCases[idx] = &tc + } + + ts.Time = junitTimeDuration(firstPickleStartedAt, lastPickleFinishedAt) + + suite.TestSuites[idx] = &ts + } + + return suite +} + +type junitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr,omitempty"` +} + +type junitError struct { + XMLName xml.Name `xml:"error,omitempty"` + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` +} + +type junitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr"` + Time string `xml:"time,attr"` + Failure *junitFailure `xml:"failure,omitempty"` + Error []*junitError +} + +type junitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Skipped int `xml:"skipped,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time string `xml:"time,attr"` + TestCases []*junitTestCase +} + +// JunitPackageSuite ... +type JunitPackageSuite struct { + XMLName xml.Name `xml:"testsuites"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Skipped int `xml:"skipped,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time string `xml:"time,attr"` + TestSuites []*junitTestSuite +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go new file mode 100644 index 000000000..001c99809 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go @@ -0,0 +1,139 @@ +package formatters + +import ( + "io" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/storage" + messages "github.com/cucumber/messages/go/v21" +) + +// MultiFormatter passes test progress to multiple formatters. +type MultiFormatter struct { + formatters []formatter + repeater repeater +} + +type formatter struct { + fmt formatters.FormatterFunc + out io.Writer +} + +type repeater []formatters.Formatter + +type storageFormatter interface { + SetStorage(s *storage.Storage) +} + +// SetStorage passes storage to all added formatters. +func (r repeater) SetStorage(s *storage.Storage) { + for _, f := range r { + if ss, ok := f.(storageFormatter); ok { + ss.SetStorage(s) + } + } +} + +// TestRunStarted triggers TestRunStarted for all added formatters. +func (r repeater) TestRunStarted() { + for _, f := range r { + f.TestRunStarted() + } +} + +// Feature triggers Feature for all added formatters. +func (r repeater) Feature(document *messages.GherkinDocument, s string, bytes []byte) { + for _, f := range r { + f.Feature(document, s, bytes) + } +} + +// Pickle triggers Pickle for all added formatters. +func (r repeater) Pickle(pickle *messages.Pickle) { + for _, f := range r { + f.Pickle(pickle) + } +} + +// Defined triggers Defined for all added formatters. +func (r repeater) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Defined(pickle, step, definition) + } +} + +// Failed triggers Failed for all added formatters. +func (r repeater) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + for _, f := range r { + f.Failed(pickle, step, definition, err) + } +} + +// Passed triggers Passed for all added formatters. +func (r repeater) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Passed(pickle, step, definition) + } +} + +// Skipped triggers Skipped for all added formatters. +func (r repeater) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Skipped(pickle, step, definition) + } +} + +// Undefined triggers Undefined for all added formatters. +func (r repeater) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Undefined(pickle, step, definition) + } +} + +// Pending triggers Pending for all added formatters. +func (r repeater) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Pending(pickle, step, definition) + } +} + +// Ambiguous triggers Ambiguous for all added formatters. +func (r repeater) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + for _, f := range r { + f.Ambiguous(pickle, step, definition, err) + } +} + +// Summary triggers Summary for all added formatters. +func (r repeater) Summary() { + for _, f := range r { + f.Summary() + } +} + +// Add adds formatter with output writer. +func (m *MultiFormatter) Add(name string, out io.Writer) { + f := formatters.FindFmt(name) + if f == nil { + panic("formatter not found: " + name) + } + + m.formatters = append(m.formatters, formatter{ + fmt: f, + out: out, + }) +} + +// FormatterFunc implements the FormatterFunc for the multi formatter. +func (m *MultiFormatter) FormatterFunc(suite string, out io.Writer) formatters.Formatter { + for _, f := range m.formatters { + out := out + if f.out != nil { + out = f.out + } + + m.repeater = append(m.repeater, f.fmt(suite, out)) + } + + return m.repeater +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go new file mode 100644 index 000000000..76d733793 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go @@ -0,0 +1,586 @@ +package formatters + +import ( + "fmt" + "io" + "regexp" + "sort" + "strings" + "unicode/utf8" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" +) + +func init() { + formatters.Format("pretty", "Prints every feature with runtime statuses.", PrettyFormatterFunc) +} + +// PrettyFormatterFunc implements the FormatterFunc for the pretty formatter +func PrettyFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Pretty{Base: NewBase(suite, out)} +} + +var outlinePlaceholderRegexp = regexp.MustCompile("<[^>]+>") + +// Pretty is a formatter for readable output. +type Pretty struct { + *Base + firstFeature *bool +} + +// TestRunStarted is triggered on test start. +func (f *Pretty) TestRunStarted() { + f.Base.TestRunStarted() + + f.Lock.Lock() + defer f.Lock.Unlock() + + firstFeature := true + f.firstFeature = &firstFeature +} + +// Feature receives gherkin document. +func (f *Pretty) Feature(gd *messages.GherkinDocument, p string, c []byte) { + f.Lock.Lock() + if !*f.firstFeature { + fmt.Fprintln(f.out, "") + } + + *f.firstFeature = false + f.Lock.Unlock() + + f.Base.Feature(gd, p, c) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printFeature(gd.Feature) +} + +// Pickle takes a gherkin node for formatting. +func (f *Pretty) Pickle(pickle *messages.Pickle) { + f.Base.Pickle(pickle) + + f.Lock.Lock() + defer f.Lock.Unlock() + + if len(pickle.Steps) == 0 { + f.printUndefinedPickle(pickle) + return + } +} + +// Passed captures passed step. +func (f *Pretty) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Skipped captures skipped step. +func (f *Pretty) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Undefined captures undefined step. +func (f *Pretty) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Failed captures failed step. +func (f *Pretty) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Failed captures failed step. +func (f *Pretty) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Pending captures pending step. +func (f *Pretty) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +func (f *Pretty) printFeature(feature *messages.Feature) { + fmt.Fprintln(f.out, keywordAndName(feature.Keyword, feature.Name)) + if strings.TrimSpace(feature.Description) != "" { + for _, line := range strings.Split(feature.Description, "\n") { + fmt.Fprintln(f.out, s(f.indent)+strings.TrimSpace(line)) + } + } +} + +func keywordAndName(keyword, name string) string { + title := whiteb(keyword + ":") + if len(name) > 0 { + title += " " + name + } + return title +} + +func (f *Pretty) scenarioLengths(pickle *messages.Pickle) (scenarioHeaderLength int, maxLength int) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + + scenarioHeaderLength = f.lengthPickle(astScenario.Keyword, astScenario.Name) + maxLength = f.longestStep(astScenario.Steps, scenarioHeaderLength) + + if astBackground != nil { + maxLength = f.longestStep(astBackground.Steps, maxLength) + } + + return scenarioHeaderLength, maxLength +} + +func (f *Pretty) printScenarioHeader(pickle *messages.Pickle, astScenario *messages.Scenario, spaceFilling int) { + feature := f.Storage.MustGetFeature(pickle.Uri) + text := s(f.indent) + keywordAndName(astScenario.Keyword, astScenario.Name) + text += s(spaceFilling) + line(feature.Uri, astScenario.Location) + fmt.Fprintln(f.out, "\n"+text) +} + +func (f *Pretty) printUndefinedPickle(pickle *messages.Pickle) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + + if astBackground != nil { + fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name)) + for _, step := range astBackground.Steps { + text := s(f.indent*2) + cyan(strings.TrimSpace(step.Keyword)) + " " + cyan(step.Text) + fmt.Fprintln(f.out, text) + } + } + + // do not print scenario headers and examples multiple times + if len(astScenario.Examples) > 0 { + exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1]) + firstExampleRow := exampleTable.TableBody[0].Id == exampleRow.Id + firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line + + if !(firstExamplesTable && firstExampleRow) { + return + } + } + + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + + for _, examples := range astScenario.Examples { + max := longestExampleRow(examples, cyan, cyan) + + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(examples.Keyword, examples.Name)) + + f.printTableHeader(examples.TableHeader, max) + + for _, row := range examples.TableBody { + f.printTableRow(row, max, cyan) + } + } +} + +// Summary renders summary information. +func (f *Pretty) Summary() { + failedStepResults := f.Storage.MustGetPickleStepResultsByStatus(failed) + if len(failedStepResults) > 0 { + fmt.Fprintln(f.out, "\n--- "+red("Failed steps:")+"\n") + + sort.Sort(sortPickleStepResultsByPickleStepID(failedStepResults)) + + for _, fail := range failedStepResults { + pickle := f.Storage.MustGetPickle(fail.PickleID) + pickleStep := f.Storage.MustGetPickleStep(fail.PickleStepID) + feature := f.Storage.MustGetFeature(pickle.Uri) + + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioDesc := fmt.Sprintf("%s: %s", astScenario.Keyword, pickle.Name) + + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + stepDesc := strings.TrimSpace(astStep.Keyword) + " " + pickleStep.Text + + fmt.Fprintln(f.out, s(f.indent)+red(scenarioDesc)+line(feature.Uri, astScenario.Location)) + fmt.Fprintln(f.out, s(f.indent*2)+red(stepDesc)+line(feature.Uri, astStep.Location)) + fmt.Fprintln(f.out, s(f.indent*3)+red("Error: ")+redb(fmt.Sprintf("%+v", fail.Err))+"\n") + } + } + + f.Base.Summary() +} + +func (f *Pretty) printOutlineExample(pickle *messages.Pickle, step *messages.PickleStep, backgroundSteps int) { + var errorMsg string + var clr = green + + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + + exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1]) + printExampleHeader := exampleTable.TableBody[0].Id == exampleRow.Id + firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleIDUntilStep(pickle.Id, step.Id) + + firstExecutedScenarioStep := len(pickleStepResults) == backgroundSteps+1 + if firstExamplesTable && printExampleHeader && firstExecutedScenarioStep { + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + } + + if len(exampleTable.TableBody) == 0 { + // do not print empty examples + return + } + + lastStep := len(pickleStepResults) == len(pickle.Steps) + if !lastStep { + // do not print examples unless all steps has finished + return + } + + for _, result := range pickleStepResults { + // determine example row status + switch { + case result.Status == failed: + errorMsg = result.Err.Error() + clr = result.Status.Color() + case result.Status == ambiguous: + errorMsg = result.Err.Error() + clr = result.Status.Color() + case result.Status == undefined || result.Status == pending: + clr = result.Status.Color() + case result.Status == skipped && clr == nil: + clr = cyan + } + + if firstExamplesTable && printExampleHeader { + // in first example, we need to print steps + + pickleStep := f.Storage.MustGetPickleStep(result.PickleStepID) + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + + var text = "" + if result.Def != nil { + if m := outlinePlaceholderRegexp.FindAllStringIndex(astStep.Text, -1); len(m) > 0 { + var pos int + for i := 0; i < len(m); i++ { + pair := m[i] + text += cyan(astStep.Text[pos:pair[0]]) + text += cyanb(astStep.Text[pair[0]:pair[1]]) + pos = pair[1] + } + text += cyan(astStep.Text[pos:len(astStep.Text)]) + } else { + text = cyan(astStep.Text) + } + + _, maxLength := f.scenarioLengths(pickle) + stepLength := f.lengthPickleStep(astStep.Keyword, astStep.Text) + + text += s(maxLength - stepLength) + text += " " + blackb("# "+DefinitionID(result.Def)) + } + + // print the step outline + fmt.Fprintln(f.out, s(f.indent*2)+cyan(strings.TrimSpace(astStep.Keyword))+" "+text) + + if pickleStep.Argument != nil { + if table := pickleStep.Argument.DataTable; table != nil { + f.printTable(table, cyan) + } + + if docString := astStep.DocString; docString != nil { + f.printDocString(docString) + } + } + } + } + + max := longestExampleRow(exampleTable, clr, cyan) + + // an example table header + if printExampleHeader { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(exampleTable.Keyword, exampleTable.Name)) + + f.printTableHeader(exampleTable.TableHeader, max) + } + + f.printTableRow(exampleRow, max, clr) + + if errorMsg != "" { + fmt.Fprintln(f.out, s(f.indent*4)+redb(errorMsg)) + } +} + +func (f *Pretty) printTableRow(row *messages.TableRow, max []int, clr colors.ColorFunc) { + cells := make([]string, len(row.Cells)) + + for i, cell := range row.Cells { + val := clr(cell.Value) + ln := utf8.RuneCountInString(val) + cells[i] = val + s(max[i]-ln) + } + + fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cells, " | ")+" |") +} + +func (f *Pretty) printTableHeader(row *messages.TableRow, max []int) { + f.printTableRow(row, max, cyan) +} + +func isFirstScenarioInRule(rule *messages.Rule, scenario *messages.Scenario) bool { + if rule == nil || scenario == nil { + return false + } + var firstScenario *messages.Scenario + for _, c := range rule.Children { + if c.Scenario != nil { + firstScenario = c.Scenario + break + } + } + return firstScenario != nil && firstScenario.Id == scenario.Id +} + +func isFirstPickleAndNoRule(feature *models.Feature, pickle *messages.Pickle, rule *messages.Rule) bool { + if rule != nil { + return false + } + return feature.Pickles[0].Id == pickle.Id +} + +func (f *Pretty) printStep(pickle *messages.Pickle, pickleStep *messages.PickleStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astRule := feature.FindRule(pickle.AstNodeIds[0]) + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + + var astBackgroundStep bool + var firstExecutedBackgroundStep bool + var backgroundSteps int + + if astBackground != nil { + backgroundSteps = len(astBackground.Steps) + + for idx, step := range astBackground.Steps { + if step.Id == pickleStep.AstNodeIds[0] { + astBackgroundStep = true + firstExecutedBackgroundStep = idx == 0 + break + } + } + } + + firstPickle := isFirstPickleAndNoRule(feature, pickle, astRule) || isFirstScenarioInRule(astRule, astScenario) + + if astBackgroundStep && !firstPickle { + return + } + + if astBackgroundStep && firstExecutedBackgroundStep { + fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name)) + } + + if !astBackgroundStep && len(astScenario.Examples) > 0 { + f.printOutlineExample(pickle, pickleStep, backgroundSteps) + return + } + + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + stepLength := f.lengthPickleStep(astStep.Keyword, pickleStep.Text) + + firstExecutedScenarioStep := astScenario.Steps[0].Id == pickleStep.AstNodeIds[0] + if !astBackgroundStep && firstExecutedScenarioStep { + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + } + + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id) + text := s(f.indent*2) + pickleStepResult.Status.Color()(strings.TrimSpace(astStep.Keyword)) + " " + pickleStepResult.Status.Color()(pickleStep.Text) + if pickleStepResult.Def != nil { + text += s(maxLength - stepLength + 1) + text += blackb("# " + DefinitionID(pickleStepResult.Def)) + } + fmt.Fprintln(f.out, text) + + if pickleStep.Argument != nil { + if table := pickleStep.Argument.DataTable; table != nil { + f.printTable(table, cyan) + } + + if docString := astStep.DocString; docString != nil { + f.printDocString(docString) + } + } + + if pickleStepResult.Err != nil { + fmt.Fprintln(f.out, s(f.indent*2)+redb(fmt.Sprintf("%+v", pickleStepResult.Err))) + } + + if pickleStepResult.Status == pending { + fmt.Fprintln(f.out, s(f.indent*3)+yellow("TODO: write pending definition")) + } +} + +func (f *Pretty) printDocString(docString *messages.DocString) { + var ct string + + if len(docString.MediaType) > 0 { + ct = " " + cyan(docString.MediaType) + } + + fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter)+ct) + + for _, ln := range strings.Split(docString.Content, "\n") { + fmt.Fprintln(f.out, s(f.indent*3)+cyan(ln)) + } + + fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter)) +} + +// print table with aligned table cells +// @TODO: need to make example header cells bold +func (f *Pretty) printTable(t *messages.PickleTable, c colors.ColorFunc) { + maxColLengths := maxColLengths(t, c) + var cols = make([]string, len(t.Rows[0].Cells)) + + for _, row := range t.Rows { + for i, cell := range row.Cells { + val := c(cell.Value) + colLength := utf8.RuneCountInString(val) + cols[i] = val + s(maxColLengths[i]-colLength) + } + + fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cols, " | ")+" |") + } +} + +// longest gives a list of longest columns of all rows in Table +func maxColLengths(t *messages.PickleTable, clrs ...colors.ColorFunc) []int { + if t == nil { + return []int{} + } + + longest := make([]int, len(t.Rows[0].Cells)) + for _, row := range t.Rows { + for i, cell := range row.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + } + + return longest +} + +func longestExampleRow(t *messages.Examples, clrs ...colors.ColorFunc) []int { + if t == nil { + return []int{} + } + + longest := make([]int, len(t.TableHeader.Cells)) + for i, cell := range t.TableHeader.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + + for _, row := range t.TableBody { + for i, cell := range row.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + } + + return longest +} + +func (f *Pretty) longestStep(steps []*messages.Step, pickleLength int) int { + max := pickleLength + + for _, step := range steps { + length := f.lengthPickleStep(step.Keyword, step.Text) + if length > max { + max = length + } + } + + return max +} + +// a line number representation in feature file +func line(path string, loc *messages.Location) string { + // Path can contain a line number already. + // This line number has to be trimmed to avoid duplication. + path = strings.TrimSuffix(path, fmt.Sprintf(":%d", loc.Line)) + return " " + blackb(fmt.Sprintf("# %s:%d", path, loc.Line)) +} + +func (f *Pretty) lengthPickleStep(keyword, text string) int { + return f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(keyword)+" "+text) +} + +func (f *Pretty) lengthPickle(keyword, name string) int { + return f.indent + utf8.RuneCountInString(strings.TrimSpace(keyword)+": "+name) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go new file mode 100644 index 000000000..9722ef7a5 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go @@ -0,0 +1,172 @@ +package formatters + +import ( + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cucumber/godog/formatters" + messages "github.com/cucumber/messages/go/v21" +) + +func init() { + formatters.Format("progress", "Prints a character per step.", ProgressFormatterFunc) +} + +// ProgressFormatterFunc implements the FormatterFunc for the progress formatter. +func ProgressFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return NewProgress(suite, out) +} + +// NewProgress creates a new progress formatter. +func NewProgress(suite string, out io.Writer) *Progress { + steps := 0 + return &Progress{ + Base: NewBase(suite, out), + StepsPerRow: 70, + Steps: &steps, + } +} + +// Progress is a minimalistic formatter. +type Progress struct { + *Base + StepsPerRow int + Steps *int +} + +// Summary renders summary information. +func (f *Progress) Summary() { + left := math.Mod(float64(*f.Steps), float64(f.StepsPerRow)) + if left != 0 { + if *f.Steps > f.StepsPerRow { + fmt.Fprintf(f.out, s(f.StepsPerRow-int(left))+fmt.Sprintf(" %d\n", *f.Steps)) + } else { + fmt.Fprintf(f.out, " %d\n", *f.Steps) + } + } + + var failedStepsOutput []string + + failedSteps := f.Storage.MustGetPickleStepResultsByStatus(failed) + sort.Sort(sortPickleStepResultsByPickleStepID(failedSteps)) + + for _, sr := range failedSteps { + if sr.Status == failed { + pickle := f.Storage.MustGetPickle(sr.PickleID) + pickleStep := f.Storage.MustGetPickleStep(sr.PickleStepID) + feature := f.Storage.MustGetFeature(pickle.Uri) + + sc := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioDesc := fmt.Sprintf("%s: %s", sc.Keyword, pickle.Name) + scenarioLine := fmt.Sprintf("%s:%d", pickle.Uri, sc.Location.Line) + + step := feature.FindStep(pickleStep.AstNodeIds[0]) + stepDesc := strings.TrimSpace(step.Keyword) + " " + pickleStep.Text + stepLine := fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line) + + failedStepsOutput = append( + failedStepsOutput, + s(2)+red(scenarioDesc)+blackb(" # "+scenarioLine), + s(4)+red(stepDesc)+blackb(" # "+stepLine), + s(6)+red("Error: ")+redb(fmt.Sprintf("%+v", sr.Err)), + "", + ) + } + } + + if len(failedStepsOutput) > 0 { + fmt.Fprintln(f.out, "\n\n--- "+red("Failed steps:")+"\n") + fmt.Fprint(f.out, strings.Join(failedStepsOutput, "\n")) + } + fmt.Fprintln(f.out, "") + + f.Base.Summary() +} + +func (f *Progress) step(pickleStepID string) { + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStepID) + + switch pickleStepResult.Status { + case passed: + fmt.Fprint(f.out, green(".")) + case skipped: + fmt.Fprint(f.out, cyan("-")) + case failed: + fmt.Fprint(f.out, red("F")) + case undefined: + fmt.Fprint(f.out, yellow("U")) + case ambiguous: + fmt.Fprint(f.out, yellow("A")) + case pending: + fmt.Fprint(f.out, yellow("P")) + } + + *f.Steps++ + + if math.Mod(float64(*f.Steps), float64(f.StepsPerRow)) == 0 { + fmt.Fprintf(f.out, " %d\n", *f.Steps) + } +} + +// Passed captures passed step. +func (f *Progress) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Skipped captures skipped step. +func (f *Progress) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Undefined captures undefined step. +func (f *Progress) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Failed captures failed step. +func (f *Progress) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Ambiguous steps. +func (f *Progress) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Pending captures pending step. +func (f *Progress) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go new file mode 100644 index 000000000..ff6cd79ef --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "text/template" + + messages "github.com/cucumber/messages/go/v21" +) + +// some snippet formatting regexps +var snippetExprCleanup = regexp.MustCompile(`([\/\[\]\(\)\\^\$\.\|\?\*\+\'])`) +var snippetExprQuoted = regexp.MustCompile(`(\W|^)"(?:[^"]*)"(\W|$)`) +var snippetMethodName = regexp.MustCompile(`[^a-zA-Z\_\ ]`) +var snippetNumbers = regexp.MustCompile(`(\d+)`) + +var snippetHelperFuncs = template.FuncMap{ + "backticked": func(s string) string { + return "`" + s + "`" + }, +} + +var undefinedSnippetsTpl = template.Must(template.New("snippets").Funcs(snippetHelperFuncs).Parse(` +{{ range . }}func {{ .Method }}({{ .Args }}) error { + return godog.ErrPending +} + +{{end}}func InitializeScenario(ctx *godog.ScenarioContext) { {{ range . }} + ctx.Step({{ backticked .Expr }}, {{ .Method }}){{end}} +} +`)) + +type undefinedSnippet struct { + Method string + Expr string + argument *messages.PickleStepArgument +} + +func (s undefinedSnippet) Args() (ret string) { + var ( + args []string + pos int + breakLoop bool + ) + + for !breakLoop { + part := s.Expr[pos:] + ipos := strings.Index(part, "(\\d+)") + spos := strings.Index(part, "\"([^\"]*)\"") + + switch { + case spos == -1 && ipos == -1: + breakLoop = true + case spos == -1: + pos += ipos + len("(\\d+)") + args = append(args, reflect.Int.String()) + case ipos == -1: + pos += spos + len("\"([^\"]*)\"") + args = append(args, reflect.String.String()) + case ipos < spos: + pos += ipos + len("(\\d+)") + args = append(args, reflect.Int.String()) + case spos < ipos: + pos += spos + len("\"([^\"]*)\"") + args = append(args, reflect.String.String()) + } + } + + if s.argument != nil { + if s.argument.DocString != nil { + args = append(args, "*godog.DocString") + } + + if s.argument.DataTable != nil { + args = append(args, "*godog.Table") + } + } + + var last string + + for i, arg := range args { + if last == "" || last == arg { + ret += fmt.Sprintf("arg%d, ", i+1) + } else { + ret = strings.TrimRight(ret, ", ") + fmt.Sprintf(" %s, arg%d, ", last, i+1) + } + + last = arg + } + + return strings.TrimSpace(strings.TrimRight(ret, ", ") + " " + last) +} + +type snippetSortByMethod []undefinedSnippet + +func (s snippetSortByMethod) Len() int { + return len(s) +} + +func (s snippetSortByMethod) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s snippetSortByMethod) Less(i, j int) bool { + return s[i].Method < s[j].Method +} diff --git a/vendor/github.com/cucumber/godog/internal/models/feature.go b/vendor/github.com/cucumber/godog/internal/models/feature.go new file mode 100644 index 000000000..9d9d84da7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/feature.go @@ -0,0 +1,151 @@ +package models + +import ( + messages "github.com/cucumber/messages/go/v21" +) + +// Feature is an internal object to group together +// the parsed gherkin document, the pickles and the +// raw content. +type Feature struct { + *messages.GherkinDocument + Pickles []*messages.Pickle + Content []byte +} + +// FindRule returns the rule to which the given scenario belongs +func (f Feature) FindRule(astScenarioID string) *messages.Rule { + for _, child := range f.GherkinDocument.Feature.Children { + if ru := child.Rule; ru != nil { + if rc := child.Rule; rc != nil { + for _, rcc := range rc.Children { + if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID { + return ru + } + } + } + } + } + return nil +} + +// FindScenario returns the scenario in the feature or in a rule in the feature +func (f Feature) FindScenario(astScenarioID string) *messages.Scenario { + for _, child := range f.GherkinDocument.Feature.Children { + if sc := child.Scenario; sc != nil && sc.Id == astScenarioID { + return sc + } + if rc := child.Rule; rc != nil { + for _, rcc := range rc.Children { + if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID { + return sc + } + } + } + } + + return nil +} + +// FindBackground ... +func (f Feature) FindBackground(astScenarioID string) *messages.Background { + var bg *messages.Background + + for _, child := range f.GherkinDocument.Feature.Children { + if tmp := child.Background; tmp != nil { + bg = tmp + } + + if sc := child.Scenario; sc != nil && sc.Id == astScenarioID { + return bg + } + + if ru := child.Rule; ru != nil { + for _, rc := range ru.Children { + if tmp := rc.Background; tmp != nil { + bg = tmp + } + + if sc := rc.Scenario; sc != nil && sc.Id == astScenarioID { + return bg + } + } + } + } + + return nil +} + +// FindExample ... +func (f Feature) FindExample(exampleAstID string) (*messages.Examples, *messages.TableRow) { + for _, child := range f.GherkinDocument.Feature.Children { + if sc := child.Scenario; sc != nil { + for _, example := range sc.Examples { + for _, row := range example.TableBody { + if row.Id == exampleAstID { + return example, row + } + } + } + } + if ru := child.Rule; ru != nil { + for _, rc := range ru.Children { + if sc := rc.Scenario; sc != nil { + for _, example := range sc.Examples { + for _, row := range example.TableBody { + if row.Id == exampleAstID { + return example, row + } + } + } + } + } + } + } + + return nil, nil +} + +// FindStep ... +func (f Feature) FindStep(astStepID string) *messages.Step { + for _, child := range f.GherkinDocument.Feature.Children { + + if ru := child.Rule; ru != nil { + for _, ch := range ru.Children { + if sc := ch.Scenario; sc != nil { + for _, step := range sc.Steps { + if step.Id == astStepID { + return step + } + } + } + + if bg := ch.Background; bg != nil { + for _, step := range bg.Steps { + if step.Id == astStepID { + return step + } + } + } + } + } + + if sc := child.Scenario; sc != nil { + for _, step := range sc.Steps { + if step.Id == astStepID { + return step + } + } + } + + if bg := child.Background; bg != nil { + for _, step := range bg.Steps { + if step.Id == astStepID { + return step + } + } + } + } + + return nil +} diff --git a/vendor/github.com/cucumber/godog/internal/models/results.go b/vendor/github.com/cucumber/godog/internal/models/results.go new file mode 100644 index 000000000..9c7f98d7f --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/results.go @@ -0,0 +1,111 @@ +package models + +import ( + "time" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/utils" +) + +// TestRunStarted ... +type TestRunStarted struct { + StartedAt time.Time +} + +// PickleResult ... +type PickleResult struct { + PickleID string + StartedAt time.Time +} + +// PickleAttachment ... +type PickleAttachment struct { + Name string + MimeType string + Data []byte +} + +// PickleStepResult ... +type PickleStepResult struct { + Status StepResultStatus + FinishedAt time.Time + Err error + + PickleID string + PickleStepID string + + Def *StepDefinition + + Attachments []PickleAttachment +} + +// NewStepResult ... +func NewStepResult( + status StepResultStatus, + pickleID, pickleStepID string, + match *StepDefinition, + attachments []PickleAttachment, + err error, +) PickleStepResult { + return PickleStepResult{ + Status: status, + FinishedAt: utils.TimeNowFunc(), + Err: err, + PickleID: pickleID, + PickleStepID: pickleStepID, + Def: match, + Attachments: attachments, + } +} + +// StepResultStatus ... +type StepResultStatus int + +const ( + // Passed ... + Passed StepResultStatus = iota + // Failed ... + Failed + // Skipped ... + Skipped + // Undefined ... + Undefined + // Pending ... + Pending + // Ambiguous ... + Ambiguous +) + +// Color ... +func (st StepResultStatus) Color() colors.ColorFunc { + switch st { + case Passed: + return colors.Green + case Failed: + return colors.Red + case Skipped: + return colors.Cyan + default: + return colors.Yellow + } +} + +// String ... +func (st StepResultStatus) String() string { + switch st { + case Passed: + return "passed" + case Failed: + return "failed" + case Skipped: + return "skipped" + case Undefined: + return "undefined" + case Pending: + return "pending" + case Ambiguous: + return "ambiguous" + default: + return "unknown" + } +} diff --git a/vendor/github.com/cucumber/godog/internal/models/stepdef.go b/vendor/github.com/cucumber/godog/internal/models/stepdef.go new file mode 100644 index 000000000..7c2e973ac --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/stepdef.go @@ -0,0 +1,309 @@ +package models + +import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" +) + +var typeOfBytes = reflect.TypeOf([]byte(nil)) + +// matchable errors +var ( + ErrUnmatchedStepArgumentNumber = errors.New("func expected more arguments than given") + ErrCannotConvert = errors.New("cannot convert argument") + ErrUnsupportedParameterType = errors.New("func has unsupported parameter type") +) + +// StepDefinition ... +type StepDefinition struct { + formatters.StepDefinition + + Args []interface{} + HandlerValue reflect.Value + File string + Line int + + // multistep related + Nested bool + Undefined []string +} + +var typeOfContext = reflect.TypeOf((*context.Context)(nil)).Elem() + +// Run a step with the matched arguments using reflect +// Returns one of ... +// (context, error) +// (context, godog.Steps) +func (sd *StepDefinition) Run(ctx context.Context) (context.Context, interface{}) { + var values []reflect.Value + + typ := sd.HandlerValue.Type() + numIn := typ.NumIn() + hasCtxIn := numIn > 0 && typ.In(0).Implements(typeOfContext) + ctxOffset := 0 + + if hasCtxIn { + values = append(values, reflect.ValueOf(ctx)) + ctxOffset = 1 + numIn-- + } + + if len(sd.Args) < numIn { + return ctx, fmt.Errorf("%w: expected %d arguments, matched %d from step", ErrUnmatchedStepArgumentNumber, numIn, len(sd.Args)) + } + + for i := 0; i < numIn; i++ { + param := typ.In(i + ctxOffset) + switch param.Kind() { + case reflect.Int: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 0) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int(v))) + case reflect.Int64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Int32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int32(v))) + case reflect.Int16: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int16: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int16(v))) + case reflect.Int8: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 8) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int8: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int8(v))) + case reflect.Uint: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 0) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint(v))) + case reflect.Uint64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Uint32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint32(v))) + case reflect.Uint16: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint16: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint16(v))) + case reflect.Uint8: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 8) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint8: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint8(v))) + case reflect.String: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + values = append(values, reflect.ValueOf(s)) + case reflect.Float64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to float64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Float32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseFloat(s, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to float32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(float32(v))) + case reflect.Ptr: + arg := sd.Args[i] + switch param.Elem().String() { + case "messages.PickleDocString": + if v, ok := arg.(*messages.PickleStepArgument); ok { + values = append(values, reflect.ValueOf(v.DocString)) + break + } + + if v, ok := arg.(*messages.PickleDocString); ok { + values = append(values, reflect.ValueOf(v)) + break + } + + return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleDocString`, ErrCannotConvert, i, arg, arg) + case "messages.PickleTable": + if v, ok := arg.(*messages.PickleStepArgument); ok { + values = append(values, reflect.ValueOf(v.DataTable)) + break + } + + if v, ok := arg.(*messages.PickleTable); ok { + values = append(values, reflect.ValueOf(v)) + break + } + + return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleTable`, ErrCannotConvert, i, arg, arg) + default: + // the error here is that the declared function has an unsupported param type - really this ought to be trapped at registration ti,e + return ctx, fmt.Errorf("%w: the data type of parameter %d type *%s is not supported", ErrUnsupportedParameterType, i, param.Elem().String()) + } + case reflect.Slice: + switch param { + case typeOfBytes: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + values = append(values, reflect.ValueOf([]byte(s))) + default: + // the problem is the function decl is not using a support slice type as the param + return ctx, fmt.Errorf("%w: the slice parameter %d type []%s is not supported", ErrUnsupportedParameterType, i, param.Elem().Kind()) + } + case reflect.Struct: + return ctx, fmt.Errorf("%w: the struct parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.String()) + default: + return ctx, fmt.Errorf("%w: the parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.Kind()) + } + } + + res := sd.HandlerValue.Call(values) + if len(res) == 0 { + return ctx, nil + } + + // Note that the step fn return types were validated at Initialise in test_context.go stepWithKeyword() + + // single return value may be one of ... + // error + // context.Context + // godog.Steps + result0 := res[0].Interface() + if len(res) == 1 { + + // if the single return value is a context then just return it + if ctx, ok := result0.(context.Context); ok { + return ctx, nil + } + + // return type is presumably one of nil, "error" or "Steps" so place it into second return position + return ctx, result0 + } + + // multi-value value return must be + // (context, error) and the context value must not be nil + if ctx, ok := result0.(context.Context); ok { + return ctx, res[1].Interface() + } + + result1 := res[1].Interface() + errMsg := "" + if result1 != nil { + errMsg = fmt.Sprintf(", step def also returned an error: %v", result1) + } + + text := sd.StepDefinition.Expr.String() + + if result0 == nil { + panic(fmt.Sprintf("step definition '%v' with return type (context.Context, error) must not return for the context.Context value%s", text, errMsg)) + } + + panic(fmt.Errorf("step definition '%v' has return type (context.Context, error), but found %v rather than a context.Context value%s", text, result0, errMsg)) +} + +func (sd *StepDefinition) shouldBeString(idx int) (string, error) { + arg := sd.Args[idx] + switch arg := arg.(type) { + case string: + return arg, nil + case *messages.PickleStepArgument: + if arg.DocString == nil { + return "", fmt.Errorf(`%w %d: "%v" of type "%T": DocString is not set`, ErrCannotConvert, idx, arg, arg) + } + return arg.DocString.Content, nil + case *messages.PickleDocString: + return arg.Content, nil + default: + return "", fmt.Errorf(`%w %d: "%v" of type "%T" to string`, ErrCannotConvert, idx, arg, arg) + } +} + +// GetInternalStepDefinition ... +func (sd *StepDefinition) GetInternalStepDefinition() *formatters.StepDefinition { + if sd == nil { + return nil + } + + return &sd.StepDefinition +} diff --git a/vendor/github.com/cucumber/godog/internal/parser/parser.go b/vendor/github.com/cucumber/godog/internal/parser/parser.go new file mode 100644 index 000000000..f607000aa --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/parser/parser.go @@ -0,0 +1,243 @@ +package parser + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "os" + "regexp" + "strconv" + "strings" + + gherkin "github.com/cucumber/gherkin/go/v26" + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/internal/flags" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/tags" +) + +var pathLineRe = regexp.MustCompile(`:([\d]+)$`) + +// ExtractFeaturePathLine ... +func ExtractFeaturePathLine(p string) (string, int) { + line := -1 + retPath := p + if m := pathLineRe.FindStringSubmatch(p); len(m) > 0 { + if i, err := strconv.Atoi(m[1]); err == nil { + line = i + retPath = p[:strings.LastIndexByte(p, ':')] + } + } + return retPath, line +} + +func parseFeatureFile(fsys fs.FS, path, dialect string, newIDFunc func() string) (*models.Feature, error) { + reader, err := fsys.Open(path) + if err != nil { + return nil, err + } + + defer reader.Close() + + var buf bytes.Buffer + gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc) + if err != nil { + return nil, fmt.Errorf("%s - %v", path, err) + } + + gherkinDocument.Uri = path + pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc) + + f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()} + return &f, nil +} + +func parseBytes(path string, feature []byte, dialect string, newIDFunc func() string) (*models.Feature, error) { + reader := bytes.NewReader(feature) + + var buf bytes.Buffer + gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc) + if err != nil { + return nil, fmt.Errorf("%s - %v", path, err) + } + + gherkinDocument.Uri = path + pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc) + + f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()} + return &f, nil +} + +func parseFeatureDir(fsys fs.FS, dir, dialect string, newIDFunc func() string) ([]*models.Feature, error) { + var features []*models.Feature + return features, fs.WalkDir(fsys, dir, func(p string, f fs.DirEntry, err error) error { + if err != nil { + return err + } + + if f.IsDir() { + return nil + } + + if !strings.HasSuffix(p, ".feature") { + return nil + } + + feat, err := parseFeatureFile(fsys, p, dialect, newIDFunc) + if err != nil { + return err + } + + features = append(features, feat) + return nil + }) +} + +func parsePath(fsys fs.FS, path, dialect string, newIDFunc func() string) ([]*models.Feature, error) { + var features []*models.Feature + + path, line := ExtractFeaturePathLine(path) + + fi, err := func() (fs.FileInfo, error) { + file, err := fsys.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err != nil { + return features, err + } + + if fi.IsDir() { + return parseFeatureDir(fsys, path, dialect, newIDFunc) + } + + ft, err := parseFeatureFile(fsys, path, dialect, newIDFunc) + if err != nil { + return features, err + } + + // filter scenario by line number + var pickles []*messages.Pickle + + if line != -1 { + ft.Uri += ":" + strconv.Itoa(line) + } + + for _, pickle := range ft.Pickles { + sc := ft.FindScenario(pickle.AstNodeIds[0]) + + if line == -1 || int64(line) == sc.Location.Line { + if line != -1 { + pickle.Uri += ":" + strconv.Itoa(line) + } + + pickles = append(pickles, pickle) + } + } + ft.Pickles = pickles + + return append(features, ft), nil +} + +// ParseFeatures ... +func ParseFeatures(fsys fs.FS, filter, dialect string, paths []string) ([]*models.Feature, error) { + var order int + + if dialect == "" { + dialect = gherkin.DefaultDialect + } + + featureIdxs := make(map[string]int) + uniqueFeatureURI := make(map[string]*models.Feature) + newIDFunc := (&messages.Incrementing{}).NewId + for _, path := range paths { + feats, err := parsePath(fsys, path, dialect, newIDFunc) + + switch { + case os.IsNotExist(err): + return nil, fmt.Errorf(`feature path "%s" is not available`, path) + case os.IsPermission(err): + return nil, fmt.Errorf(`feature path "%s" is not accessible`, path) + case err != nil: + return nil, err + } + + for _, ft := range feats { + if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate { + continue + } + + uniqueFeatureURI[ft.Uri] = ft + featureIdxs[ft.Uri] = order + + order++ + } + } + + var features = make([]*models.Feature, len(uniqueFeatureURI)) + for uri, feature := range uniqueFeatureURI { + idx := featureIdxs[uri] + features[idx] = feature + } + + features = filterFeatures(filter, features) + + return features, nil +} + +type FeatureContent = flags.Feature + +func ParseFromBytes(filter, dialect string, featuresInputs []FeatureContent) ([]*models.Feature, error) { + var order int + + if dialect == "" { + dialect = gherkin.DefaultDialect + } + + featureIdxs := make(map[string]int) + uniqueFeatureURI := make(map[string]*models.Feature) + newIDFunc := (&messages.Incrementing{}).NewId + for _, f := range featuresInputs { + ft, err := parseBytes(f.Name, f.Contents, dialect, newIDFunc) + if err != nil { + return nil, err + } + + if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate { + continue + } + + uniqueFeatureURI[ft.Uri] = ft + featureIdxs[ft.Uri] = order + + order++ + } + + var features = make([]*models.Feature, len(uniqueFeatureURI)) + for uri, feature := range uniqueFeatureURI { + idx := featureIdxs[uri] + features[idx] = feature + } + + features = filterFeatures(filter, features) + + return features, nil +} + +func filterFeatures(filter string, features []*models.Feature) (result []*models.Feature) { + for _, ft := range features { + ft.Pickles = tags.ApplyTagFilter(filter, ft.Pickles) + + if ft.Feature != nil && len(ft.Pickles) > 0 { + result = append(result, ft) + } + } + + return +} diff --git a/vendor/github.com/cucumber/godog/internal/storage/fs.go b/vendor/github.com/cucumber/godog/internal/storage/fs.go new file mode 100644 index 000000000..333c61def --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/storage/fs.go @@ -0,0 +1,21 @@ +package storage + +import ( + "io/fs" + "os" +) + +// FS is a wrapper that falls back to `os`. +type FS struct { + FS fs.FS +} + +// Open a file in the provided `fs.FS`. If none provided, +// open via `os.Open` +func (f FS) Open(name string) (fs.File, error) { + if f.FS == nil { + return os.Open(name) + } + + return f.FS.Open(name) +} diff --git a/vendor/github.com/cucumber/godog/internal/storage/storage.go b/vendor/github.com/cucumber/godog/internal/storage/storage.go new file mode 100644 index 000000000..72b7e86f7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/storage/storage.go @@ -0,0 +1,338 @@ +package storage + +import ( + "fmt" + "sync" + + messages "github.com/cucumber/messages/go/v21" + "github.com/hashicorp/go-memdb" + + "github.com/cucumber/godog/internal/models" +) + +const ( + writeMode bool = true + readMode bool = false + + tableFeature string = "feature" + tableFeatureIndexURI string = "id" + + tablePickle string = "pickle" + tablePickleIndexID string = "id" + tablePickleIndexURI string = "uri" + + tablePickleStep string = "pickle_step" + tablePickleStepIndexID string = "id" + + tablePickleResult string = "pickle_result" + tablePickleResultIndexPickleID string = "id" + + tablePickleStepResult string = "pickle_step_result" + tablePickleStepResultIndexPickleStepID string = "id" + tablePickleStepResultIndexPickleID string = "pickle_id" + tablePickleStepResultIndexStatus string = "status" + + tableStepDefintionMatch string = "step_defintion_match" + tableStepDefintionMatchIndexStepID string = "id" +) + +// Storage is a thread safe in-mem storage +type Storage struct { + db *memdb.MemDB + + testRunStarted models.TestRunStarted + testRunStartedLock *sync.Mutex +} + +// NewStorage will create an in-mem storage that +// is used across concurrent runners and formatters +func NewStorage() *Storage { + schema := memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + tableFeature: { + Name: tableFeature, + Indexes: map[string]*memdb.IndexSchema{ + tableFeatureIndexURI: { + Name: tableFeatureIndexURI, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Uri"}, + }, + }, + }, + tablePickle: { + Name: tablePickle, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleIndexID: { + Name: tablePickleIndexID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Id"}, + }, + tablePickleIndexURI: { + Name: tablePickleIndexURI, + Unique: false, + Indexer: &memdb.StringFieldIndex{Field: "Uri"}, + }, + }, + }, + tablePickleStep: { + Name: tablePickleStep, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleStepIndexID: { + Name: tablePickleStepIndexID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Id"}, + }, + }, + }, + tablePickleResult: { + Name: tablePickleResult, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleResultIndexPickleID: { + Name: tablePickleResultIndexPickleID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "PickleID"}, + }, + }, + }, + tablePickleStepResult: { + Name: tablePickleStepResult, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleStepResultIndexPickleStepID: { + Name: tablePickleStepResultIndexPickleStepID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "PickleStepID"}, + }, + tablePickleStepResultIndexPickleID: { + Name: tablePickleStepResultIndexPickleID, + Unique: false, + Indexer: &memdb.StringFieldIndex{Field: "PickleID"}, + }, + tablePickleStepResultIndexStatus: { + Name: tablePickleStepResultIndexStatus, + Unique: false, + Indexer: &memdb.IntFieldIndex{Field: "Status"}, + }, + }, + }, + tableStepDefintionMatch: { + Name: tableStepDefintionMatch, + Indexes: map[string]*memdb.IndexSchema{ + tableStepDefintionMatchIndexStepID: { + Name: tableStepDefintionMatchIndexStepID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "StepID"}, + }, + }, + }, + }, + } + + db, err := memdb.NewMemDB(&schema) + if err != nil { + panic(err) + } + + return &Storage{db: db, testRunStartedLock: new(sync.Mutex)} +} + +// MustInsertPickle will insert a pickle and it's steps, +// will panic on error. +func (s *Storage) MustInsertPickle(p *messages.Pickle) { + txn := s.db.Txn(writeMode) + + if err := txn.Insert(tablePickle, p); err != nil { + panic(err) + } + + for _, step := range p.Steps { + if err := txn.Insert(tablePickleStep, step); err != nil { + panic(err) + } + } + + txn.Commit() +} + +// MustGetPickle will retrieve a pickle by id and panic on error. +func (s *Storage) MustGetPickle(id string) *messages.Pickle { + v := s.mustFirst(tablePickle, tablePickleIndexID, id) + return v.(*messages.Pickle) +} + +// MustGetPickles will retrieve pickles by URI and panic on error. +func (s *Storage) MustGetPickles(uri string) (ps []*messages.Pickle) { + it := s.mustGet(tablePickle, tablePickleIndexURI, uri) + for v := it.Next(); v != nil; v = it.Next() { + ps = append(ps, v.(*messages.Pickle)) + } + + return +} + +// MustGetPickleStep will retrieve a pickle step and panic on error. +func (s *Storage) MustGetPickleStep(id string) *messages.PickleStep { + v := s.mustFirst(tablePickleStep, tablePickleStepIndexID, id) + return v.(*messages.PickleStep) +} + +// MustInsertTestRunStarted will set the test run started event and panic on error. +func (s *Storage) MustInsertTestRunStarted(trs models.TestRunStarted) { + s.testRunStartedLock.Lock() + defer s.testRunStartedLock.Unlock() + + s.testRunStarted = trs +} + +// MustGetTestRunStarted will retrieve the test run started event and panic on error. +func (s *Storage) MustGetTestRunStarted() models.TestRunStarted { + s.testRunStartedLock.Lock() + defer s.testRunStartedLock.Unlock() + + return s.testRunStarted +} + +// MustInsertPickleResult will instert a pickle result and panic on error. +func (s *Storage) MustInsertPickleResult(pr models.PickleResult) { + s.mustInsert(tablePickleResult, pr) +} + +// MustInsertPickleStepResult will insert a pickle step result and panic on error. +func (s *Storage) MustInsertPickleStepResult(psr models.PickleStepResult) { + s.mustInsert(tablePickleStepResult, psr) +} + +// MustGetPickleResult will retrieve a pickle result by id and panic on error. +func (s *Storage) MustGetPickleResult(id string) models.PickleResult { + v := s.mustFirst(tablePickleResult, tablePickleResultIndexPickleID, id) + return v.(models.PickleResult) +} + +// MustGetPickleResults will retrieve all pickle results and panic on error. +func (s *Storage) MustGetPickleResults() (prs []models.PickleResult) { + it := s.mustGet(tablePickleResult, tablePickleResultIndexPickleID) + for v := it.Next(); v != nil; v = it.Next() { + prs = append(prs, v.(models.PickleResult)) + } + + return prs +} + +// MustGetPickleStepResult will retrieve a pickle strep result by id and panic on error. +func (s *Storage) MustGetPickleStepResult(id string) models.PickleStepResult { + v := s.mustFirst(tablePickleStepResult, tablePickleStepResultIndexPickleStepID, id) + return v.(models.PickleStepResult) +} + +// MustGetPickleStepResultsByPickleID will retrieve pickle step results by pickle id and panic on error. +func (s *Storage) MustGetPickleStepResultsByPickleID(pickleID string) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID) + for v := it.Next(); v != nil; v = it.Next() { + psrs = append(psrs, v.(models.PickleStepResult)) + } + + return psrs +} + +// MustGetPickleStepResultsByPickleIDUntilStep will retrieve pickle step results by pickle id +// from 0..stepID for that pickle. +func (s *Storage) MustGetPickleStepResultsByPickleIDUntilStep(pickleID string, untilStepID string) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID) + for v := it.Next(); v != nil; v = it.Next() { + psr := v.(models.PickleStepResult) + psrs = append(psrs, psr) + if psr.PickleStepID == untilStepID { + break + } + } + + return psrs +} + +// MustGetPickleStepResultsByStatus will retrieve pickle strep results by status and panic on error. +func (s *Storage) MustGetPickleStepResultsByStatus(status models.StepResultStatus) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexStatus, status) + for v := it.Next(); v != nil; v = it.Next() { + psrs = append(psrs, v.(models.PickleStepResult)) + } + + return psrs +} + +// MustInsertFeature will insert a feature and panic on error. +func (s *Storage) MustInsertFeature(f *models.Feature) { + s.mustInsert(tableFeature, f) +} + +// MustGetFeature will retrieve a feature by URI and panic on error. +func (s *Storage) MustGetFeature(uri string) *models.Feature { + v := s.mustFirst(tableFeature, tableFeatureIndexURI, uri) + return v.(*models.Feature) +} + +// MustGetFeatures will retrieve all features by and panic on error. +func (s *Storage) MustGetFeatures() (fs []*models.Feature) { + it := s.mustGet(tableFeature, tableFeatureIndexURI) + for v := it.Next(); v != nil; v = it.Next() { + fs = append(fs, v.(*models.Feature)) + } + + return +} + +type stepDefinitionMatch struct { + StepID string + StepDefinition *models.StepDefinition +} + +// MustInsertStepDefintionMatch will insert the matched StepDefintion for the step ID and panic on error. +func (s *Storage) MustInsertStepDefintionMatch(stepID string, match *models.StepDefinition) { + d := stepDefinitionMatch{ + StepID: stepID, + StepDefinition: match, + } + + s.mustInsert(tableStepDefintionMatch, d) +} + +// MustGetStepDefintionMatch will retrieve the matched StepDefintion for the step ID and panic on error. +func (s *Storage) MustGetStepDefintionMatch(stepID string) *models.StepDefinition { + v := s.mustFirst(tableStepDefintionMatch, tableStepDefintionMatchIndexStepID, stepID) + return v.(stepDefinitionMatch).StepDefinition +} + +func (s *Storage) mustInsert(table string, obj interface{}) { + txn := s.db.Txn(writeMode) + + if err := txn.Insert(table, obj); err != nil { + panic(err) + } + + txn.Commit() +} + +func (s *Storage) mustFirst(table, index string, args ...interface{}) interface{} { + txn := s.db.Txn(readMode) + defer txn.Abort() + + v, err := txn.First(table, index, args...) + if err != nil { + panic(err) + } else if v == nil { + err = fmt.Errorf("couldn't find index: %q in table: %q with args: %+v", index, table, args) + panic(err) + } + + return v +} + +func (s *Storage) mustGet(table, index string, args ...interface{}) memdb.ResultIterator { + txn := s.db.Txn(readMode) + defer txn.Abort() + + it, err := txn.Get(table, index, args...) + if err != nil { + panic(err) + } + + return it +} diff --git a/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go new file mode 100644 index 000000000..72b4512b4 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go @@ -0,0 +1,62 @@ +package tags + +import ( + "strings" + + messages "github.com/cucumber/messages/go/v21" +) + +// ApplyTagFilter will apply a filter string on the +// array of pickles and returned the filtered list. +func ApplyTagFilter(filter string, pickles []*messages.Pickle) []*messages.Pickle { + if filter == "" { + return pickles + } + + var result = []*messages.Pickle{} + + for _, pickle := range pickles { + if match(filter, pickle.Tags) { + result = append(result, pickle) + } + } + + return result +} + +// Based on http://behat.readthedocs.org/en/v2.5/guides/6.cli.html#gherkin-filters +func match(filter string, tags []*messages.PickleTag) (ok bool) { + ok = true + + for _, andTags := range strings.Split(filter, "&&") { + var okComma bool + + for _, tag := range strings.Split(andTags, ",") { + tag = strings.TrimSpace(tag) + tag = strings.Replace(tag, "@", "", -1) + + okComma = contains(tags, tag) || okComma + + if tag[0] == '~' { + tag = tag[1:] + okComma = !contains(tags, tag) || okComma + } + } + + ok = ok && okComma + } + + return +} + +func contains(tags []*messages.PickleTag, tag string) bool { + for _, t := range tags { + tagName := strings.Replace(t.Name, "@", "", -1) + + if tagName == tag { + return true + } + } + + return false +} diff --git a/vendor/github.com/cucumber/godog/internal/utils/utils.go b/vendor/github.com/cucumber/godog/internal/utils/utils.go new file mode 100644 index 000000000..f1ec21f95 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/utils/utils.go @@ -0,0 +1,21 @@ +package utils + +import ( + "strings" + "time" +) + +// S repeats a space n times +func S(n int) string { + if n < 0 { + n = 1 + } + return strings.Repeat(" ", n) +} + +// TimeNowFunc is a utility function to simply testing +// by allowing TimeNowFunc to be defined to zero time +// to remove the time domain from tests +var TimeNowFunc = func() time.Time { + return time.Now() +} diff --git a/vendor/github.com/cucumber/godog/logo.png b/vendor/github.com/cucumber/godog/logo.png new file mode 100644 index 000000000..70e6c7aa8 Binary files /dev/null and b/vendor/github.com/cucumber/godog/logo.png differ diff --git a/vendor/github.com/cucumber/godog/logo.svg b/vendor/github.com/cucumber/godog/logo.svg new file mode 100644 index 000000000..bfda7fdb1 --- /dev/null +++ b/vendor/github.com/cucumber/godog/logo.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/cucumber/godog/mod_version.go b/vendor/github.com/cucumber/godog/mod_version.go new file mode 100644 index 000000000..c915e1627 --- /dev/null +++ b/vendor/github.com/cucumber/godog/mod_version.go @@ -0,0 +1,16 @@ +//go:build go1.12 +// +build go1.12 + +package godog + +import ( + "runtime/debug" +) + +func init() { + if info, available := debug.ReadBuildInfo(); available { + if Version == "v0.0.0-dev" && info.Main.Version != "(devel)" { + Version = info.Main.Version + } + } +} diff --git a/vendor/github.com/cucumber/godog/options.go b/vendor/github.com/cucumber/godog/options.go new file mode 100644 index 000000000..2b32cfd8f --- /dev/null +++ b/vendor/github.com/cucumber/godog/options.go @@ -0,0 +1,12 @@ +package godog + +import "github.com/cucumber/godog/internal/flags" + +// Options are suite run options +// flags are mapped to these options. +// +// It can also be used together with godog.RunWithOptions +// to run test suite from go source directly +// +// See the flags for more details +type Options = flags.Options diff --git a/vendor/github.com/cucumber/godog/run.go b/vendor/github.com/cucumber/godog/run.go new file mode 100644 index 000000000..1231d0286 --- /dev/null +++ b/vendor/github.com/cucumber/godog/run.go @@ -0,0 +1,409 @@ +package godog + +import ( + "context" + "flag" + "fmt" + "go/build" + "io" + "io/fs" + "math/rand" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "testing" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + ifmt "github.com/cucumber/godog/internal/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/parser" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +const ( + exitSuccess int = iota + exitFailure + exitOptionError +) + +type ( + testSuiteInitializer func(*TestSuiteContext) + scenarioInitializer func(*ScenarioContext) +) + +type runner struct { + randomSeed int64 + stopOnFailure, strict bool + + defaultContext context.Context + testingT *testing.T + + features []*models.Feature + + testSuiteInitializer testSuiteInitializer + scenarioInitializer scenarioInitializer + + storage *storage.Storage + fmt Formatter +} + +func (r *runner) concurrent(rate int) (failed bool) { + var copyLock sync.Mutex + + if fmt, ok := r.fmt.(storageFormatter); ok { + fmt.SetStorage(r.storage) + } + + testSuiteContext := TestSuiteContext{ + suite: &suite{ + fmt: r.fmt, + randomSeed: r.randomSeed, + strict: r.strict, + storage: r.storage, + defaultContext: r.defaultContext, + testingT: r.testingT, + }, + } + if r.testSuiteInitializer != nil { + r.testSuiteInitializer(&testSuiteContext) + } + + testRunStarted := models.TestRunStarted{StartedAt: utils.TimeNowFunc()} + r.storage.MustInsertTestRunStarted(testRunStarted) + r.fmt.TestRunStarted() + + // run before suite handlers + for _, f := range testSuiteContext.beforeSuiteHandlers { + f() + } + + queue := make(chan int, rate) + for _, ft := range r.features { + pickles := make([]*messages.Pickle, len(ft.Pickles)) + if r.randomSeed != 0 { + r := rand.New(rand.NewSource(r.randomSeed)) + perm := r.Perm(len(ft.Pickles)) + for i, v := range perm { + pickles[v] = ft.Pickles[i] + } + } else { + copy(pickles, ft.Pickles) + } + + for i, p := range pickles { + pickle := *p + + queue <- i // reserve space in queue + + if i == 0 { + r.fmt.Feature(ft.GherkinDocument, ft.Uri, ft.Content) + } + + runPickle := func(fail *bool, pickle *messages.Pickle) { + defer func() { + <-queue // free a space in queue + }() + + if r.stopOnFailure && *fail { + return + } + + // Copy base suite. + suite := *testSuiteContext.suite + if rate > 1 { + // if running concurrently, only print at end of scenario to keep + // scenario logs segregated + ffmt := ifmt.WrapOnFlush(testSuiteContext.suite.fmt) + suite.fmt = ffmt + defer ffmt.Flush() + } + + if r.scenarioInitializer != nil { + sc := ScenarioContext{suite: &suite} + r.scenarioInitializer(&sc) + } + + err := suite.runPickle(pickle) + if suite.shouldFail(err) { + copyLock.Lock() + *fail = true + copyLock.Unlock() + } + } + + if rate == 1 { + // Running within the same goroutine for concurrency 1 + // to preserve original stacks and simplify debugging. + runPickle(&failed, &pickle) + } else { + go runPickle(&failed, &pickle) + } + } + } + + // wait until last are processed + for i := 0; i < rate; i++ { + queue <- i + } + + close(queue) + + // run after suite handlers + for _, f := range testSuiteContext.afterSuiteHandlers { + f() + } + + // print summary + r.fmt.Summary() + return +} + +func runWithOptions(suiteName string, runner runner, opt Options) int { + var output io.Writer = os.Stdout + if nil != opt.Output { + output = opt.Output + } + + multiFmt := ifmt.MultiFormatter{} + + for _, formatter := range strings.Split(opt.Format, ",") { + out := output + formatterParts := strings.SplitN(formatter, ":", 2) + + if len(formatterParts) > 1 { + f, err := os.Create(formatterParts[1]) + if err != nil { + err = fmt.Errorf( + `couldn't create file with name: "%s", error: %s`, + formatterParts[1], err.Error(), + ) + fmt.Fprintln(os.Stderr, err) + + return exitOptionError + } + + defer f.Close() + + out = f + } + + if opt.NoColors { + out = colors.Uncolored(out) + } else { + out = colors.Colored(out) + } + + if nil == formatters.FindFmt(formatterParts[0]) { + var names []string + for name := range formatters.AvailableFormatters() { + names = append(names, name) + } + fmt.Fprintln(os.Stderr, fmt.Errorf( + `unregistered formatter name: "%s", use one of: %s`, + opt.Format, + strings.Join(names, ", "), + )) + return exitOptionError + } + + multiFmt.Add(formatterParts[0], out) + } + + if opt.ShowStepDefinitions { + s := suite{} + sc := ScenarioContext{suite: &s} + runner.scenarioInitializer(&sc) + printStepDefinitions(s.steps, output) + return exitOptionError + } + + if len(opt.Paths) == 0 && len(opt.FeatureContents) == 0 { + inf, err := func() (fs.FileInfo, error) { + file, err := opt.FS.Open("features") + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err == nil && inf.IsDir() { + opt.Paths = []string{"features"} + } + } + + if opt.Concurrency < 1 { + opt.Concurrency = 1 + } + + runner.fmt = multiFmt.FormatterFunc(suiteName, output) + opt.FS = storage.FS{FS: opt.FS} + + if len(opt.FeatureContents) > 0 { + features, err := parser.ParseFromBytes(opt.Tags, opt.Dialect, opt.FeatureContents) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return exitOptionError + } + runner.features = append(runner.features, features...) + } + + if len(opt.Paths) > 0 { + features, err := parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return exitOptionError + } + runner.features = append(runner.features, features...) + } + + runner.storage = storage.NewStorage() + for _, feat := range runner.features { + runner.storage.MustInsertFeature(feat) + + for _, pickle := range feat.Pickles { + runner.storage.MustInsertPickle(pickle) + } + } + + // user may have specified -1 option to create random seed + runner.randomSeed = opt.Randomize + if runner.randomSeed == -1 { + runner.randomSeed = makeRandomSeed() + } + + runner.stopOnFailure = opt.StopOnFailure + runner.strict = opt.Strict + runner.defaultContext = opt.DefaultContext + runner.testingT = opt.TestingT + + // store chosen seed in environment, so it could be seen in formatter summary report + os.Setenv("GODOG_SEED", strconv.FormatInt(runner.randomSeed, 10)) + // determine tested package + _, filename, _, _ := runtime.Caller(1) + os.Setenv("GODOG_TESTED_PACKAGE", runsFromPackage(filename)) + + failed := runner.concurrent(opt.Concurrency) + + // @TODO: should prevent from having these + os.Setenv("GODOG_SEED", "") + os.Setenv("GODOG_TESTED_PACKAGE", "") + if failed && opt.Format != "events" { + return exitFailure + } + return exitSuccess +} + +func runsFromPackage(fp string) string { + dir := filepath.Dir(fp) + + gopaths := filepath.SplitList(build.Default.GOPATH) + for _, gp := range gopaths { + gp = filepath.Join(gp, "src") + if strings.Index(dir, gp) == 0 { + return strings.TrimLeft(strings.Replace(dir, gp, "", 1), string(filepath.Separator)) + } + } + return dir +} + +// TestSuite allows for configuration +// of the Test Suite Execution +type TestSuite struct { + Name string + TestSuiteInitializer func(*TestSuiteContext) + ScenarioInitializer func(*ScenarioContext) + Options *Options +} + +// Run will execute the test suite. +// +// If options are not set, it will reads +// all configuration options from flags. +// +// The exit codes may vary from: +// +// 0 - success +// 1 - failed +// 2 - command line usage error +// 128 - or higher, os signal related error exit codes +// +// If there are flag related errors they will be directed to os.Stderr +func (ts TestSuite) Run() int { + if ts.Options == nil { + var err error + ts.Options, err = getDefaultOptions() + if err != nil { + return exitOptionError + } + } + if ts.Options.FS == nil { + ts.Options.FS = storage.FS{} + } + if ts.Options.ShowHelp { + flag.CommandLine.Usage() + + return 0 + } + + r := runner{testSuiteInitializer: ts.TestSuiteInitializer, scenarioInitializer: ts.ScenarioInitializer} + return runWithOptions(ts.Name, r, *ts.Options) +} + +// RetrieveFeatures will parse and return the features based on test suite option +// Any modification on the parsed features will not have any impact on the next Run of the Test Suite +func (ts TestSuite) RetrieveFeatures() ([]*models.Feature, error) { + opt := ts.Options + + if opt == nil { + var err error + opt, err = getDefaultOptions() + if err != nil { + return nil, err + } + } + + if ts.Options.FS == nil { + ts.Options.FS = storage.FS{} + } + + if len(opt.Paths) == 0 { + inf, err := func() (fs.FileInfo, error) { + file, err := opt.FS.Open("features") + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err == nil && inf.IsDir() { + opt.Paths = []string{"features"} + } + } + + return parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths) +} + +func getDefaultOptions() (*Options, error) { + opt := &Options{} + opt.Output = colors.Colored(os.Stdout) + + flagSet := flagSet(opt) + if err := flagSet.Parse(os.Args[1:]); err != nil { + fmt.Fprintln(os.Stderr, err) + return nil, err + } + + opt.Paths = flagSet.Args() + opt.FS = storage.FS{} + + return opt, nil +} diff --git a/vendor/github.com/cucumber/godog/stacktrace.go b/vendor/github.com/cucumber/godog/stacktrace.go new file mode 100644 index 000000000..686c6b09b --- /dev/null +++ b/vendor/github.com/cucumber/godog/stacktrace.go @@ -0,0 +1,141 @@ +package godog + +import ( + "fmt" + "go/build" + "io" + "path" + "path/filepath" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type stackFrame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f stackFrame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f stackFrame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +func trimGoPath(file string) string { + for _, p := range filepath.SplitList(build.Default.GOPATH) { + file = strings.Replace(file, filepath.Join(p, "src")+string(filepath.Separator), "", 1) + } + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f stackFrame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f stackFrame) Format(s fmt.State, verb rune) { + funcname := func(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] + } + + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), trimGoPath(file)) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := stackFrame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func callStack() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// fundamental is an error that has a message and a stack, but no caller. +type traceError struct { + msg string + *stack +} + +func (f *traceError) Error() string { return f.msg } + +func (f *traceError) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} diff --git a/vendor/github.com/cucumber/godog/suite.go b/vendor/github.com/cucumber/godog/suite.go new file mode 100644 index 000000000..6ca1bf53d --- /dev/null +++ b/vendor/github.com/cucumber/godog/suite.go @@ -0,0 +1,651 @@ +package godog + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "testing" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +var ( + errorInterface = reflect.TypeOf((*error)(nil)).Elem() + contextInterface = reflect.TypeOf((*context.Context)(nil)).Elem() +) + +// more than one regex matched the step text +var ErrAmbiguous = fmt.Errorf("ambiguous step definition") + +// ErrUndefined is returned in case if step definition was not found +var ErrUndefined = fmt.Errorf("step is undefined") + +// ErrPending should be returned by step definition if +// step implementation is pending +var ErrPending = fmt.Errorf("step implementation is pending") + +// ErrSkip should be returned by step definition or a hook if scenario and further steps are to be skipped. +var ErrSkip = fmt.Errorf("skipped") + +// StepResultStatus describes step result. +type StepResultStatus = models.StepResultStatus + +const ( + // StepPassed indicates step that passed. + StepPassed StepResultStatus = models.Passed + // StepFailed indicates step that failed. + StepFailed = models.Failed + // StepSkipped indicates step that was skipped. + StepSkipped = models.Skipped + // StepUndefined indicates undefined step. + StepUndefined = models.Undefined + // StepPending indicates step with pending implementation. + StepPending = models.Pending + // StepAmbiguous indicates step text matches more than one step def + StepAmbiguous = models.Ambiguous +) + +type suite struct { + steps []*models.StepDefinition + + fmt Formatter + storage *storage.Storage + + failed bool + randomSeed int64 + stopOnFailure bool + strict bool + + defaultContext context.Context + testingT *testing.T + + // suite event handlers + beforeScenarioHandlers []BeforeScenarioHook + beforeStepHandlers []BeforeStepHook + afterStepHandlers []AfterStepHook + afterScenarioHandlers []AfterScenarioHook +} + +type Attachment struct { + Body []byte + FileName string + MediaType string +} + +type attachmentKey struct{} + +func Attach(ctx context.Context, attachments ...Attachment) context.Context { + existing := Attachments(ctx) + updated := append(existing, attachments...) + return context.WithValue(ctx, attachmentKey{}, updated) +} + +func Attachments(ctx context.Context) []Attachment { + v := ctx.Value(attachmentKey{}) + + if v == nil { + return []Attachment{} + } + return v.([]Attachment) +} + +func clearAttach(ctx context.Context) context.Context { + return context.WithValue(ctx, attachmentKey{}, nil) +} + +func pickleAttachments(ctx context.Context) []models.PickleAttachment { + + pickledAttachments := []models.PickleAttachment{} + attachments := Attachments(ctx) + + for _, a := range attachments { + pickledAttachments = append(pickledAttachments, models.PickleAttachment{ + Name: a.FileName, + Data: a.Body, + MimeType: a.MediaType, + }) + } + + return pickledAttachments +} + +func (s *suite) matchStep(step *messages.PickleStep) (*models.StepDefinition, error) { + def, err := s.matchStepTextAndType(step.Text, step.Type) + if err != nil { + return nil, err + } + + if def != nil && step.Argument != nil { + def.Args = append(def.Args, step.Argument) + } + return def, nil +} + +func (s *suite) runStep(ctx context.Context, pickle *Scenario, step *Step, scenarioErr error, isFirst, isLast bool) (rctx context.Context, err error) { + var match *models.StepDefinition + + rctx = ctx + + // user multistep definitions may panic + defer func() { + if e := recover(); e != nil { + pe, isErr := e.(error) + switch { + case isErr && errors.Is(pe, errStopNow): + // FailNow or SkipNow called on dogTestingT, so clear the error to let the normal + // below getTestingT(ctx).isFailed() call handle the reasons. + err = nil + case err != nil: + err = &traceError{ + msg: fmt.Sprintf("%s: %v", err.Error(), e), + stack: callStack(), + } + default: + err = &traceError{ + msg: fmt.Sprintf("%v", e), + stack: callStack(), + } + } + } + + earlyReturn := scenarioErr != nil || errors.Is(err, ErrUndefined) + + // Check for any calls to Fail on dogT + if err == nil { + if t := getTestingT(ctx); t != nil { + err = t.isFailed() + } + } + + status := StepUndefined + + switch { + case errors.Is(err, ErrAmbiguous): + status = StepAmbiguous + case errors.Is(err, ErrPending): + status = StepPending + case errors.Is(err, ErrSkip), err == nil && scenarioErr != nil: + status = StepSkipped + case errors.Is(err, ErrUndefined): + status = StepUndefined + case err != nil: + status = StepFailed + case err == nil && scenarioErr == nil: + status = StepPassed + } + + // Run after step handlers. + rctx, err = s.runAfterStepHooks(ctx, step, status, err) + + // Trigger after scenario on failing or last step to attach possible hook error to step. + if !s.shouldFail(scenarioErr) && (isLast || s.shouldFail(err)) { + rctx, err = s.runAfterScenarioHooks(rctx, pickle, err) + } + + // extract any accumulated attachments and clear them + pickledAttachments := pickleAttachments(rctx) + rctx = clearAttach(rctx) + + if earlyReturn { + return + } + + switch { + case err == nil: + sr := models.NewStepResult(models.Passed, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Passed(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrPending): + sr := models.NewStepResult(models.Pending, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Pending(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrSkip): + sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrAmbiguous): + sr := models.NewStepResult(models.Ambiguous, pickle.Id, step.Id, match, pickledAttachments, err) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Ambiguous(pickle, step, match.GetInternalStepDefinition(), err) + default: + sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, err) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Failed(pickle, step, match.GetInternalStepDefinition(), err) + } + }() + + // run before scenario handlers + if isFirst { + ctx, err = s.runBeforeScenarioHooks(ctx, pickle) + } + + // run before step handlers + ctx, err = s.runBeforeStepHooks(ctx, step, err) + + var matchError error + match, matchError = s.matchStep(step) + + s.storage.MustInsertStepDefintionMatch(step.AstNodeIds[0], match) + s.fmt.Defined(pickle, step, match.GetInternalStepDefinition()) + + if err != nil { + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + return ctx, err + } + + if matchError != nil { + return ctx, matchError + } + + if ctx, undef, err := s.maybeUndefined(ctx, step.Text, step.Argument, step.Type); err != nil { + return ctx, err + } else if len(undef) > 0 { + if match != nil { + match = &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Expr: match.Expr, + Handler: match.Handler, + Keyword: match.Keyword, + }, + Args: match.Args, + HandlerValue: match.HandlerValue, + File: match.File, + Line: match.Line, + Nested: match.Nested, + Undefined: undef, + } + } + + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Undefined, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + + s.fmt.Undefined(pickle, step, match.GetInternalStepDefinition()) + return ctx, fmt.Errorf("%w: %s", ErrUndefined, step.Text) + } + + if scenarioErr != nil { + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + + s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition()) + return ctx, nil + } + + ctx, err = s.maybeSubSteps(match.Run(ctx)) + + return ctx, err +} + +func (s *suite) runBeforeStepHooks(ctx context.Context, step *Step, err error) (context.Context, error) { + hooksFailed := false + + for _, f := range s.beforeStepHandlers { + hctx, herr := f(ctx, step) + if herr != nil { + hooksFailed = true + + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if hooksFailed { + err = fmt.Errorf("before step hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) runAfterStepHooks(ctx context.Context, step *Step, status StepResultStatus, err error) (context.Context, error) { + for _, f := range s.afterStepHandlers { + hctx, herr := f(ctx, step, status, err) + + // Adding hook error to resulting error without breaking hooks loop. + if herr != nil { + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + return ctx, err +} + +func (s *suite) runBeforeScenarioHooks(ctx context.Context, pickle *messages.Pickle) (context.Context, error) { + var err error + + // run before scenario handlers + for _, f := range s.beforeScenarioHandlers { + hctx, herr := f(ctx, pickle) + if herr != nil { + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if err != nil { + err = fmt.Errorf("before scenario hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) runAfterScenarioHooks(ctx context.Context, pickle *messages.Pickle, lastStepErr error) (context.Context, error) { + err := lastStepErr + + hooksFailed := false + isStepErr := true + + // run after scenario handlers + for _, f := range s.afterScenarioHandlers { + hctx, herr := f(ctx, pickle, err) + + // Adding hook error to resulting error without breaking hooks loop. + if herr != nil { + hooksFailed = true + + if err == nil { + isStepErr = false + err = herr + } else { + if isStepErr { + err = fmt.Errorf("step error: %w", err) + isStepErr = false + } + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if hooksFailed { + err = fmt.Errorf("after scenario hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) maybeUndefined(ctx context.Context, text string, arg interface{}, stepType messages.PickleStepType) (context.Context, []string, error) { + var undefined []string + step, err := s.matchStepTextAndType(text, stepType) + if err != nil { + return ctx, undefined, err + } + + if nil == step { + return ctx, []string{text}, nil + } + + if !step.Nested { + return ctx, undefined, nil + } + + if arg != nil { + step.Args = append(step.Args, arg) + } + + ctx, steps := step.Run(ctx) + + for _, next := range steps.(Steps) { + lines := strings.Split(next, "\n") + // @TODO: we cannot currently parse table or content body from nested steps + if len(lines) > 1 { + return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument") + } + if len(lines[0]) > 0 && lines[0][len(lines[0])-1] == ':' { + return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument") + } + ctx, undef, err := s.maybeUndefined(ctx, next, nil, messages.PickleStepType_UNKNOWN) + if err != nil { + return ctx, undefined, err + } + undefined = append(undefined, undef...) + } + return ctx, undefined, nil +} + +func (s *suite) maybeSubSteps(ctx context.Context, result interface{}) (context.Context, error) { + if nil == result { + return ctx, nil + } + + if err, ok := result.(error); ok { + return ctx, err + } + + steps, ok := result.(Steps) + if !ok { + return ctx, fmt.Errorf("unexpected error, should have been godog.Steps: %T - %+v", result, result) + } + + for _, text := range steps { + def, err := s.matchStepTextAndType(text, messages.PickleStepType_UNKNOWN) + if err != nil { + return ctx, err + } + + if def == nil { + return ctx, fmt.Errorf("%w: %s", ErrUndefined, text) + } else { + ctx, err = s.runSubStep(ctx, text, def) + if err != nil { + return ctx, err + } + } + } + return ctx, nil +} + +func (s *suite) runSubStep(ctx context.Context, text string, def *models.StepDefinition) (_ context.Context, err error) { + st := &Step{} + st.Text = text + st.Type = messages.PickleStepType_ACTION + + defer func() { + status := StepPassed + + switch { + case errors.Is(err, ErrUndefined): + status = StepUndefined + case errors.Is(err, ErrPending): + status = StepPending + case err != nil: + status = StepFailed + } + + ctx, err = s.runAfterStepHooks(ctx, st, status, err) + }() + + ctx, err = s.runBeforeStepHooks(ctx, st, nil) + if err != nil { + return ctx, fmt.Errorf("%s: %+v", text, err) + } + + if ctx, err = s.maybeSubSteps(def.Run(ctx)); err != nil { + return ctx, fmt.Errorf("%s: %+v", text, err) + } + + return ctx, nil +} + +func (s *suite) matchStepTextAndType(text string, stepType messages.PickleStepType) (*models.StepDefinition, error) { + var first *models.StepDefinition + matchingExpressions := make([]string, 0) + + for _, h := range s.steps { + if m := h.Expr.FindStringSubmatch(text); len(m) > 0 { + if !keywordMatches(h.Keyword, stepType) { + continue + } + var args []interface{} + for _, m := range m[1:] { + args = append(args, m) + } + + matchingExpressions = append(matchingExpressions, h.Expr.String()) + + // since we need to assign arguments + // better to copy the step definition + match := &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Expr: h.Expr, + Handler: h.Handler, + Keyword: h.Keyword, + }, + Args: args, + HandlerValue: h.HandlerValue, + File: h.File, + Line: h.Line, + Nested: h.Nested, + } + + if first == nil { + first = match + } + } + } + + if s.strict { + if len(matchingExpressions) > 1 { + errs := "\n " + strings.Join(matchingExpressions, "\n ") + return nil, fmt.Errorf("%w, step text: %s\n matches:%s", ErrAmbiguous, text, errs) + } + } + + return first, nil +} + +func keywordMatches(k formatters.Keyword, stepType messages.PickleStepType) bool { + if k == formatters.None { + return true + } + switch stepType { + case messages.PickleStepType_CONTEXT: + return k == formatters.Given + case messages.PickleStepType_ACTION: + return k == formatters.When + case messages.PickleStepType_OUTCOME: + return k == formatters.Then + default: + return true + } +} + +func (s *suite) runSteps(ctx context.Context, pickle *Scenario, steps []*Step) (context.Context, error) { + var ( + stepErr, scenarioErr error + ) + + for i, step := range steps { + isLast := i == len(steps)-1 + isFirst := i == 0 + ctx, stepErr = s.runStep(ctx, pickle, step, scenarioErr, isFirst, isLast) + if scenarioErr == nil || s.shouldFail(stepErr) { + scenarioErr = stepErr + } + } + + return ctx, scenarioErr +} + +func (s *suite) shouldFail(err error) bool { + if err == nil || errors.Is(err, ErrSkip) { + return false + } + + if errors.Is(err, ErrUndefined) || errors.Is(err, ErrPending) { + return s.strict + } + + return true +} + +func (s *suite) runPickle(pickle *messages.Pickle) (err error) { + ctx := s.defaultContext + if ctx == nil { + ctx = context.Background() + } + + ctx, cancel := context.WithCancel(ctx) + + defer cancel() + + if len(pickle.Steps) == 0 { + pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()} + s.storage.MustInsertPickleResult(pr) + + s.fmt.Pickle(pickle) + return fmt.Errorf("%w: no steps in scenario", ErrUndefined) + } + + // Before scenario hooks are called in context of first evaluated step + // so that error from handler can be added to step. + + pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()} + s.storage.MustInsertPickleResult(pr) + + s.fmt.Pickle(pickle) + + dt := &testingT{ + name: pickle.Name, + } + ctx = setContextTestingT(ctx, dt) + // scenario + if s.testingT != nil { + // Running scenario as a subtest. + s.testingT.Run(pickle.Name, func(t *testing.T) { + dt.t = t + ctx, err = s.runSteps(ctx, pickle, pickle.Steps) + if s.shouldFail(err) { + t.Errorf("%+v", err) + } + }) + } else { + ctx, err = s.runSteps(ctx, pickle, pickle.Steps) + } + + // After scenario handlers are called in context of last evaluated step + // so that error from handler can be added to step. + + return err +} diff --git a/vendor/github.com/cucumber/godog/test_context.go b/vendor/github.com/cucumber/godog/test_context.go new file mode 100644 index 000000000..add9f47b0 --- /dev/null +++ b/vendor/github.com/cucumber/godog/test_context.go @@ -0,0 +1,371 @@ +package godog + +import ( + "context" + "fmt" + "reflect" + "regexp" + "runtime" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/builder" + "github.com/cucumber/godog/internal/flags" + "github.com/cucumber/godog/internal/models" +) + +// GherkinDocument represents gherkin document. +type GherkinDocument = messages.GherkinDocument + +// Scenario represents the executed scenario +type Scenario = messages.Pickle + +// Step represents the executed step +type Step = messages.PickleStep + +// Steps allows to nest steps +// instead of returning an error in step func +// it is possible to return combined steps: +// +// func multistep(name string) godog.Steps { +// return godog.Steps{ +// fmt.Sprintf(`an user named "%s"`, name), +// fmt.Sprintf(`user "%s" is authenticated`, name), +// } +// } +// +// These steps will be matched and executed in +// sequential order. The first one which fails +// will result in main step failure. +type Steps []string + +// StepDefinition is a registered step definition +// contains a StepHandler and regexp which +// is used to match a step. Args which +// were matched by last executed step +// +// This structure is passed to the formatter +// when step is matched and is either failed +// or successful +type StepDefinition = formatters.StepDefinition + +// DocString represents the DocString argument made to a step definition +type DocString = messages.PickleDocString + +// Table represents the Table argument made to a step definition +type Table = messages.PickleTable + +// TestSuiteContext allows various contexts +// to register event handlers. +// +// When running a test suite, the instance of TestSuiteContext +// is passed to all functions (contexts), which +// have it as a first and only argument. +// +// Note that all event hooks does not catch panic errors +// in order to have a trace information +type TestSuiteContext struct { + beforeSuiteHandlers []func() + afterSuiteHandlers []func() + + suite *suite +} + +// BeforeSuite registers a function or method +// to be run once before suite runner. +// +// Use it to prepare the test suite for a spin. +// Connect and prepare database for instance... +func (ctx *TestSuiteContext) BeforeSuite(fn func()) { + ctx.beforeSuiteHandlers = append(ctx.beforeSuiteHandlers, fn) +} + +// AfterSuite registers a function or method +// to be run once after suite runner +func (ctx *TestSuiteContext) AfterSuite(fn func()) { + ctx.afterSuiteHandlers = append(ctx.afterSuiteHandlers, fn) +} + +// ScenarioContext allows registering scenario hooks. +func (ctx *TestSuiteContext) ScenarioContext() *ScenarioContext { + return &ScenarioContext{ + suite: ctx.suite, + } +} + +// ScenarioContext allows various contexts +// to register steps and event handlers. +// +// When running a scenario, the instance of ScenarioContext +// is passed to all functions (contexts), which +// have it as a first and only argument. +// +// Note that all event hooks does not catch panic errors +// in order to have a trace information. Only step +// executions are catching panic error since it may +// be a context specific error. +type ScenarioContext struct { + suite *suite +} + +// StepContext allows registering step hooks. +type StepContext struct { + suite *suite +} + +// Before registers a function or method +// to be run before every scenario. +// +// It is a good practice to restore the default state +// before every scenario, so it would be isolated from +// any kind of state. +func (ctx ScenarioContext) Before(h BeforeScenarioHook) { + ctx.suite.beforeScenarioHandlers = append(ctx.suite.beforeScenarioHandlers, h) +} + +// BeforeScenarioHook defines a hook before scenario. +type BeforeScenarioHook func(ctx context.Context, sc *Scenario) (context.Context, error) + +// After registers a function or method +// to be run after every scenario. +func (ctx ScenarioContext) After(h AfterScenarioHook) { + ctx.suite.afterScenarioHandlers = append(ctx.suite.afterScenarioHandlers, h) +} + +// AfterScenarioHook defines a hook after scenario. +type AfterScenarioHook func(ctx context.Context, sc *Scenario, err error) (context.Context, error) + +// StepContext exposes StepContext of a scenario. +func (ctx ScenarioContext) StepContext() StepContext { + return StepContext(ctx) +} + +// Before registers a function or method +// to be run before every step. +func (ctx StepContext) Before(h BeforeStepHook) { + ctx.suite.beforeStepHandlers = append(ctx.suite.beforeStepHandlers, h) +} + +// BeforeStepHook defines a hook before step. +type BeforeStepHook func(ctx context.Context, st *Step) (context.Context, error) + +// After registers a function or method +// to be run after every step. +// +// It may be convenient to return a different kind of error +// in order to print more state details which may help +// in case of step failure +// +// In some cases, for example when running a headless +// browser, to take a screenshot after failure. +func (ctx StepContext) After(h AfterStepHook) { + ctx.suite.afterStepHandlers = append(ctx.suite.afterStepHandlers, h) +} + +// AfterStepHook defines a hook after step. +type AfterStepHook func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error) + +// BeforeScenario registers a function or method +// to be run before every scenario. +// +// It is a good practice to restore the default state +// before every scenario, so it would be isolated from +// any kind of state. +// +// Deprecated: use Before. +func (ctx ScenarioContext) BeforeScenario(fn func(sc *Scenario)) { + ctx.Before(func(ctx context.Context, sc *Scenario) (context.Context, error) { + fn(sc) + + return ctx, nil + }) +} + +// AfterScenario registers a function or method +// to be run after every scenario. +// +// Deprecated: use After. +func (ctx ScenarioContext) AfterScenario(fn func(sc *Scenario, err error)) { + ctx.After(func(ctx context.Context, sc *Scenario, err error) (context.Context, error) { + fn(sc, err) + + return ctx, nil + }) +} + +// BeforeStep registers a function or method +// to be run before every step. +// +// Deprecated: use ScenarioContext.StepContext() and StepContext.Before. +func (ctx ScenarioContext) BeforeStep(fn func(st *Step)) { + ctx.StepContext().Before(func(ctx context.Context, st *Step) (context.Context, error) { + fn(st) + + return ctx, nil + }) +} + +// AfterStep registers a function or method +// to be run after every step. +// +// It may be convenient to return a different kind of error +// in order to print more state details which may help +// in case of step failure +// +// In some cases, for example when running a headless +// browser, to take a screenshot after failure. +// +// Deprecated: use ScenarioContext.StepContext() and StepContext.After. +func (ctx ScenarioContext) AfterStep(fn func(st *Step, err error)) { + ctx.StepContext().After(func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error) { + fn(st, err) + + return ctx, nil + }) +} + +// Step allows to register a *StepDefinition in the +// Godog feature suite, the definition will be applied +// to all steps matching the given Regexp expr. +// +// It will panic if expr is not a valid regular +// expression or stepFunc is not a valid step +// handler. +// +// The expression can be of type: *regexp.Regexp, string or []byte +// +// The stepFunc may accept one or several arguments of type: +// - int, int8, int16, int32, int64 +// - float32, float64 +// - string +// - []byte +// - *godog.DocString +// - *godog.Table +// +// The stepFunc need to return either an error or []string for multistep +// +// Note that if there are two definitions which may match +// the same step, then only the first matched handler +// will be applied. +// +// If none of the *StepDefinition is matched, then +// ErrUndefined error will be returned when +// running steps. +func (ctx ScenarioContext) Step(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.None) +} + +// Given functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "Given". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) Given(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.Given) +} + +// When functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "When". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) When(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.When) +} + +// Then functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "Then". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) Then(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.Then) +} + +func (ctx ScenarioContext) stepWithKeyword(expr interface{}, stepFunc interface{}, keyword formatters.Keyword) { + var regex *regexp.Regexp + + // Validate the first input param is regex compatible + switch t := expr.(type) { + case *regexp.Regexp: + regex = t + case string: + regex = regexp.MustCompile(t) + case []byte: + regex = regexp.MustCompile(string(t)) + default: + panic(fmt.Sprintf("expecting expr to be a *regexp.Regexp or a string or []byte, got type: %T", expr)) + } + + // Validate that the handler is a function. + handlerType := reflect.TypeOf(stepFunc) + if handlerType.Kind() != reflect.Func { + panic(fmt.Sprintf("expected handler to be func, but got: %T", stepFunc)) + } + + // FIXME = Validate the handler function param types here so + // that any errors are discovered early. + // StepDefinition.Run defines the supported types but fails at run time not registration time + + // Validate the function's return types. + helpPrefix := "expected handler to return one of error or context.Context or godog.Steps or (context.Context, error)" + isNested := false + + numOut := handlerType.NumOut() + switch numOut { + case 0: + // No return values. + case 1: + // One return value: should be error, Steps, or context.Context. + outType := handlerType.Out(0) + if outType == reflect.TypeOf(Steps{}) { + isNested = true + } else { + if outType != errorInterface && outType != contextInterface { + panic(fmt.Sprintf("%s, but got: %v", helpPrefix, outType)) + } + } + case 2: + // Two return values: should be (context.Context, error). + if handlerType.Out(0) != contextInterface || handlerType.Out(1) != errorInterface { + panic(fmt.Sprintf("%s, but got: %v, %v", helpPrefix, handlerType.Out(0), handlerType.Out(1))) + } + default: + // More than two return values. + panic(fmt.Sprintf("expected handler to return either zero, one or two values, but it has: %d", numOut)) + } + + // Register the handler + def := &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Handler: stepFunc, + Expr: regex, + Keyword: keyword, + }, + HandlerValue: reflect.ValueOf(stepFunc), + Nested: isNested, + } + + // Get the file and line number of the call that created this step with a + // call to one of the Step, Given, When, or Then wrappers. + _, def.File, def.Line, _ = runtime.Caller(2) + + // stash the step + ctx.suite.steps = append(ctx.suite.steps, def) +} + +// Build creates a test package like go test command at given target path. +// If there are no go files in tested directory, then +// it simply builds a godog executable to scan features. +// +// If there are go test files, it first builds a test +// package with standard go test command. +// +// Finally, it generates godog suite executable which +// registers exported godog contexts from the test files +// of tested package. +// +// Returns the path to generated executable +func Build(bin string) error { + return builder.Build(bin) +} + +type Feature = flags.Feature diff --git a/vendor/github.com/cucumber/godog/testingt.go b/vendor/github.com/cucumber/godog/testingt.go new file mode 100644 index 000000000..25981b89a --- /dev/null +++ b/vendor/github.com/cucumber/godog/testingt.go @@ -0,0 +1,206 @@ +package godog + +import ( + "context" + "fmt" + "strings" + "testing" +) + +// T returns a TestingT compatible interface from the current test context. It will return nil if +// called outside the context of a test. This can be used with (for example) testify's assert and +// require packages. +func T(ctx context.Context) TestingT { + return getTestingT(ctx) +} + +// TestingT is a subset of the public methods implemented by go's testing.T. It allows assertion +// libraries to be used with godog, provided they depend only on this subset of methods. +type TestingT interface { + // Name returns the name of the current pickle under test + Name() string + // Log will log to the current testing.T log if set, otherwise it will log to stdout + Log(args ...interface{}) + // Logf will log a formatted string to the current testing.T log if set, otherwise it will log + // to stdout + Logf(format string, args ...interface{}) + // Error fails the current test and logs the provided arguments. Equivalent to calling Log then + // Fail. + Error(args ...interface{}) + // Errorf fails the current test and logs the formatted message. Equivalent to calling Logf then + // Fail. + Errorf(format string, args ...interface{}) + // Fail marks the current test as failed, but does not halt execution of the step. + Fail() + // FailNow marks the current test as failed and halts execution of the step. + FailNow() + // Fatal logs the provided arguments, marks the test as failed and halts execution of the step. + Fatal(args ...interface{}) + // Fatal logs the formatted message, marks the test as failed and halts execution of the step. + Fatalf(format string, args ...interface{}) + // Skip logs the provided arguments and marks the test as skipped but does not halt execution + // of the step. + Skip(args ...interface{}) + // Skipf logs the formatted message and marks the test as skipped but does not halt execution + // of the step. + Skipf(format string, args ...interface{}) + // SkipNow marks the current test as skipped and halts execution of the step. + SkipNow() + // Skipped returns true if the test has been marked as skipped. + Skipped() bool +} + +// Logf will log test output. If called in the context of a test and testing.T has been registered, +// this will log using the step's testing.T, else it will simply log to stdout. +func Logf(ctx context.Context, format string, args ...interface{}) { + if t := getTestingT(ctx); t != nil { + t.Logf(format, args...) + return + } + fmt.Printf(format+"\n", args...) +} + +// Log will log test output. If called in the context of a test and testing.T has been registered, +// this will log using the step's testing.T, else it will simply log to stdout. +func Log(ctx context.Context, args ...interface{}) { + if t := getTestingT(ctx); t != nil { + t.Log(args...) + return + } + fmt.Println(args...) +} + +// LoggedMessages returns an array of any logged messages that have been recorded during the test +// through calls to godog.Log / godog.Logf or via operations against godog.T(ctx) +func LoggedMessages(ctx context.Context) []string { + if t := getTestingT(ctx); t != nil { + return t.logMessages + } + return nil +} + +// errStopNow should be returned inside a panic within the test to immediately halt execution of that +// test +var errStopNow = fmt.Errorf("FailNow or SkipNow called") + +type testingT struct { + name string + t *testing.T + failed bool + skipped bool + failMessages []string + logMessages []string +} + +// check interface against our testingT and the upstream testing.B/F/T: +var ( + _ TestingT = &testingT{} + _ TestingT = (*testing.T)(nil) +) + +func (dt *testingT) Name() string { + if dt.t != nil { + return dt.t.Name() + } + return dt.name +} + +func (dt *testingT) Log(args ...interface{}) { + dt.logMessages = append(dt.logMessages, fmt.Sprint(args...)) + if dt.t != nil { + dt.t.Log(args...) + return + } + fmt.Println(args...) +} + +func (dt *testingT) Logf(format string, args ...interface{}) { + dt.logMessages = append(dt.logMessages, fmt.Sprintf(format, args...)) + if dt.t != nil { + dt.t.Logf(format, args...) + return + } + fmt.Printf(format+"\n", args...) +} + +func (dt *testingT) Error(args ...interface{}) { + dt.Log(args...) + dt.failMessages = append(dt.failMessages, fmt.Sprintln(args...)) + dt.Fail() +} + +func (dt *testingT) Errorf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.failMessages = append(dt.failMessages, fmt.Sprintf(format, args...)) + dt.Fail() +} + +func (dt *testingT) Fail() { + dt.failed = true +} + +func (dt *testingT) FailNow() { + dt.Fail() + panic(errStopNow) +} + +func (dt *testingT) Fatal(args ...interface{}) { + dt.Log(args...) + dt.FailNow() +} + +func (dt *testingT) Fatalf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.FailNow() +} + +func (dt *testingT) Skip(args ...interface{}) { + dt.Log(args...) + dt.skipped = true +} + +func (dt *testingT) Skipf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.skipped = true +} + +func (dt *testingT) SkipNow() { + dt.skipped = true + panic(errStopNow) +} + +func (dt *testingT) Skipped() bool { + return dt.skipped +} + +// isFailed will return an error representing the calls to Fail made during this test +func (dt *testingT) isFailed() error { + if dt.skipped { + return ErrSkip + } + if !dt.failed { + return nil + } + switch len(dt.failMessages) { + case 0: + return fmt.Errorf("fail called on TestingT") + case 1: + return fmt.Errorf(dt.failMessages[0]) + default: + return fmt.Errorf("checks failed:\n* %s", strings.Join(dt.failMessages, "\n* ")) + } +} + +type testingTCtxVal struct{} + +func setContextTestingT(ctx context.Context, dt *testingT) context.Context { + return context.WithValue(ctx, testingTCtxVal{}, dt) +} + +func getTestingT(ctx context.Context) *testingT { + dt, ok := ctx.Value(testingTCtxVal{}).(*testingT) + if !ok { + return nil + } + return dt +} diff --git a/vendor/github.com/cucumber/messages/go/v21/.gitignore b/vendor/github.com/cucumber/messages/go/v21/.gitignore new file mode 100644 index 000000000..7b0ee7aeb --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/.gitignore @@ -0,0 +1,17 @@ +.built +.compared +.deps +.dist +.dist-compressed +.go-get +.gofmt +.linted +.tested* +acceptance/ +bin/ +dist/ +dist_compressed/ +*.bin +*.iml +# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump +core.*.!usr!bin!upx-ucl diff --git a/vendor/github.com/cucumber/messages/go/v21/Dockerfile b/vendor/github.com/cucumber/messages/go/v21/Dockerfile new file mode 100644 index 000000000..64e1c2795 --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/Dockerfile @@ -0,0 +1,29 @@ +#syntax=docker/dockerfile:1.4 + +# Base image +ARG GO_VERSION=1.19 +FROM golang:${GO_VERSION}-alpine AS golang +WORKDIR /cucumber + + +# Dummy stage for generated code, overriden in main build +FROM scratch AS schema-codegen + + +FROM golang AS with-dependencies + +COPY --link go.mod go.sum . +RUN --mount=type=cache,target=/go/pkg/mod/cache \ + go mod download && go mod verify + + +FROM golang AS tested + +RUN apk add gcc libc-dev + +COPY --link . . +COPY --link --from=with-dependencies /go/pkg /go/pkg +COPY --link --from=schema-codegen /*.go . + +RUN gofmt -w . +RUN go test --v diff --git a/vendor/github.com/cucumber/messages/go/v21/LICENSE b/vendor/github.com/cucumber/messages/go/v21/LICENSE new file mode 100644 index 000000000..725ba9f4a --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Cucumber Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cucumber/messages/go/v21/Makefile b/vendor/github.com/cucumber/messages/go/v21/Makefile new file mode 100644 index 000000000..898214efa --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/Makefile @@ -0,0 +1,20 @@ +schemas = $(shell find ../jsonschema -name "*.json") + +.DEFAULT_GOAL = help + +help: ## Show this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +generate: require messages.go ## Generate go code based on the schemas found in ../jsonschema and using the scripts in ../jsonschema/scripts for the generation + +require: ## Check requirements for the code generation (ruby and go are required) + @ruby --version >/dev/null 2>&1 || (echo "ERROR: ruby is required."; exit 1) + @go version >/dev/null 2>&1 || (echo "ERROR: go is required."; exit 1) + +clean: ## Remove automatically generated files and related artifacts + rm -f messages.go + +messages.go: $(schemas) ../jsonschema/scripts/codegen.rb ../jsonschema/scripts/templates/go.go.erb ../jsonschema/scripts/templates/go.enum.go.erb + ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.go.erb > $@ + ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.enum.go.erb >> $@ + go fmt messages.go diff --git a/vendor/github.com/cucumber/messages/go/v21/id_generator.go b/vendor/github.com/cucumber/messages/go/v21/id_generator.go new file mode 100644 index 000000000..a721f9789 --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/id_generator.go @@ -0,0 +1,28 @@ +package messages + +import ( + "github.com/gofrs/uuid" + "strconv" +) + +type IdGenerator interface { + newId() func() string +} + +type Incrementing struct { + next int +} + +func (self *Incrementing) NewId() string { + result := strconv.Itoa(self.next) + self.next++ + return result +} + +type UUID struct { + next int +} + +func (i UUID) NewId() string { + return uuid.Must(uuid.NewV4()).String() +} diff --git a/vendor/github.com/cucumber/messages/go/v21/messages.go b/vendor/github.com/cucumber/messages/go/v21/messages.go new file mode 100644 index 000000000..fbb173d4e --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/messages.go @@ -0,0 +1,510 @@ +package messages + +type Attachment struct { + Body string `json:"body"` + ContentEncoding AttachmentContentEncoding `json:"contentEncoding"` + FileName string `json:"fileName,omitempty"` + MediaType string `json:"mediaType"` + Source *Source `json:"source,omitempty"` + TestCaseStartedId string `json:"testCaseStartedId,omitempty"` + TestStepId string `json:"testStepId,omitempty"` + Url string `json:"url,omitempty"` +} + +type Duration struct { + Seconds int64 `json:"seconds"` + Nanos int64 `json:"nanos"` +} + +type Envelope struct { + Attachment *Attachment `json:"attachment,omitempty"` + GherkinDocument *GherkinDocument `json:"gherkinDocument,omitempty"` + Hook *Hook `json:"hook,omitempty"` + Meta *Meta `json:"meta,omitempty"` + ParameterType *ParameterType `json:"parameterType,omitempty"` + ParseError *ParseError `json:"parseError,omitempty"` + Pickle *Pickle `json:"pickle,omitempty"` + Source *Source `json:"source,omitempty"` + StepDefinition *StepDefinition `json:"stepDefinition,omitempty"` + TestCase *TestCase `json:"testCase,omitempty"` + TestCaseFinished *TestCaseFinished `json:"testCaseFinished,omitempty"` + TestCaseStarted *TestCaseStarted `json:"testCaseStarted,omitempty"` + TestRunFinished *TestRunFinished `json:"testRunFinished,omitempty"` + TestRunStarted *TestRunStarted `json:"testRunStarted,omitempty"` + TestStepFinished *TestStepFinished `json:"testStepFinished,omitempty"` + TestStepStarted *TestStepStarted `json:"testStepStarted,omitempty"` + UndefinedParameterType *UndefinedParameterType `json:"undefinedParameterType,omitempty"` +} + +type Exception struct { + Type string `json:"type"` + Message string `json:"message,omitempty"` +} + +type GherkinDocument struct { + Uri string `json:"uri,omitempty"` + Feature *Feature `json:"feature,omitempty"` + Comments []*Comment `json:"comments"` +} + +type Background struct { + Location *Location `json:"location"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Steps []*Step `json:"steps"` + Id string `json:"id"` +} + +type Comment struct { + Location *Location `json:"location"` + Text string `json:"text"` +} + +type DataTable struct { + Location *Location `json:"location"` + Rows []*TableRow `json:"rows"` +} + +type DocString struct { + Location *Location `json:"location"` + MediaType string `json:"mediaType,omitempty"` + Content string `json:"content"` + Delimiter string `json:"delimiter"` +} + +type Examples struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + TableHeader *TableRow `json:"tableHeader,omitempty"` + TableBody []*TableRow `json:"tableBody"` + Id string `json:"id"` +} + +type Feature struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Language string `json:"language"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Children []*FeatureChild `json:"children"` +} + +type FeatureChild struct { + Rule *Rule `json:"rule,omitempty"` + Background *Background `json:"background,omitempty"` + Scenario *Scenario `json:"scenario,omitempty"` +} + +type Rule struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Children []*RuleChild `json:"children"` + Id string `json:"id"` +} + +type RuleChild struct { + Background *Background `json:"background,omitempty"` + Scenario *Scenario `json:"scenario,omitempty"` +} + +type Scenario struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Steps []*Step `json:"steps"` + Examples []*Examples `json:"examples"` + Id string `json:"id"` +} + +type Step struct { + Location *Location `json:"location"` + Keyword string `json:"keyword"` + KeywordType StepKeywordType `json:"keywordType,omitempty"` + Text string `json:"text"` + DocString *DocString `json:"docString,omitempty"` + DataTable *DataTable `json:"dataTable,omitempty"` + Id string `json:"id"` +} + +type TableCell struct { + Location *Location `json:"location"` + Value string `json:"value"` +} + +type TableRow struct { + Location *Location `json:"location"` + Cells []*TableCell `json:"cells"` + Id string `json:"id"` +} + +type Tag struct { + Location *Location `json:"location"` + Name string `json:"name"` + Id string `json:"id"` +} + +type Hook struct { + Id string `json:"id"` + Name string `json:"name,omitempty"` + SourceReference *SourceReference `json:"sourceReference"` + TagExpression string `json:"tagExpression,omitempty"` +} + +type Location struct { + Line int64 `json:"line"` + Column int64 `json:"column,omitempty"` +} + +type Meta struct { + ProtocolVersion string `json:"protocolVersion"` + Implementation *Product `json:"implementation"` + Runtime *Product `json:"runtime"` + Os *Product `json:"os"` + Cpu *Product `json:"cpu"` + Ci *Ci `json:"ci,omitempty"` +} + +type Ci struct { + Name string `json:"name"` + Url string `json:"url,omitempty"` + BuildNumber string `json:"buildNumber,omitempty"` + Git *Git `json:"git,omitempty"` +} + +type Git struct { + Remote string `json:"remote"` + Revision string `json:"revision"` + Branch string `json:"branch,omitempty"` + Tag string `json:"tag,omitempty"` +} + +type Product struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` +} + +type ParameterType struct { + Name string `json:"name"` + RegularExpressions []string `json:"regularExpressions"` + PreferForRegularExpressionMatch bool `json:"preferForRegularExpressionMatch"` + UseForSnippets bool `json:"useForSnippets"` + Id string `json:"id"` +} + +type ParseError struct { + Source *SourceReference `json:"source"` + Message string `json:"message"` +} + +type Pickle struct { + Id string `json:"id"` + Uri string `json:"uri"` + Name string `json:"name"` + Language string `json:"language"` + Steps []*PickleStep `json:"steps"` + Tags []*PickleTag `json:"tags"` + AstNodeIds []string `json:"astNodeIds"` +} + +type PickleDocString struct { + MediaType string `json:"mediaType,omitempty"` + Content string `json:"content"` +} + +type PickleStep struct { + Argument *PickleStepArgument `json:"argument,omitempty"` + AstNodeIds []string `json:"astNodeIds"` + Id string `json:"id"` + Type PickleStepType `json:"type,omitempty"` + Text string `json:"text"` +} + +type PickleStepArgument struct { + DocString *PickleDocString `json:"docString,omitempty"` + DataTable *PickleTable `json:"dataTable,omitempty"` +} + +type PickleTable struct { + Rows []*PickleTableRow `json:"rows"` +} + +type PickleTableCell struct { + Value string `json:"value"` +} + +type PickleTableRow struct { + Cells []*PickleTableCell `json:"cells"` +} + +type PickleTag struct { + Name string `json:"name"` + AstNodeId string `json:"astNodeId"` +} + +type Source struct { + Uri string `json:"uri"` + Data string `json:"data"` + MediaType SourceMediaType `json:"mediaType"` +} + +type SourceReference struct { + Uri string `json:"uri,omitempty"` + JavaMethod *JavaMethod `json:"javaMethod,omitempty"` + JavaStackTraceElement *JavaStackTraceElement `json:"javaStackTraceElement,omitempty"` + Location *Location `json:"location,omitempty"` +} + +type JavaMethod struct { + ClassName string `json:"className"` + MethodName string `json:"methodName"` + MethodParameterTypes []string `json:"methodParameterTypes"` +} + +type JavaStackTraceElement struct { + ClassName string `json:"className"` + FileName string `json:"fileName"` + MethodName string `json:"methodName"` +} + +type StepDefinition struct { + Id string `json:"id"` + Pattern *StepDefinitionPattern `json:"pattern"` + SourceReference *SourceReference `json:"sourceReference"` +} + +type StepDefinitionPattern struct { + Source string `json:"source"` + Type StepDefinitionPatternType `json:"type"` +} + +type TestCase struct { + Id string `json:"id"` + PickleId string `json:"pickleId"` + TestSteps []*TestStep `json:"testSteps"` +} + +type Group struct { + Children []*Group `json:"children"` + Start int64 `json:"start,omitempty"` + Value string `json:"value,omitempty"` +} + +type StepMatchArgument struct { + Group *Group `json:"group"` + ParameterTypeName string `json:"parameterTypeName,omitempty"` +} + +type StepMatchArgumentsList struct { + StepMatchArguments []*StepMatchArgument `json:"stepMatchArguments"` +} + +type TestStep struct { + HookId string `json:"hookId,omitempty"` + Id string `json:"id"` + PickleStepId string `json:"pickleStepId,omitempty"` + StepDefinitionIds []string `json:"stepDefinitionIds,omitempty"` + StepMatchArgumentsLists []*StepMatchArgumentsList `json:"stepMatchArgumentsLists,omitempty"` +} + +type TestCaseFinished struct { + TestCaseStartedId string `json:"testCaseStartedId"` + Timestamp *Timestamp `json:"timestamp"` + WillBeRetried bool `json:"willBeRetried"` +} + +type TestCaseStarted struct { + Attempt int64 `json:"attempt"` + Id string `json:"id"` + TestCaseId string `json:"testCaseId"` + WorkerId string `json:"workerId,omitempty"` + Timestamp *Timestamp `json:"timestamp"` +} + +type TestRunFinished struct { + Message string `json:"message,omitempty"` + Success bool `json:"success"` + Timestamp *Timestamp `json:"timestamp"` + Exception *Exception `json:"exception,omitempty"` +} + +type TestRunStarted struct { + Timestamp *Timestamp `json:"timestamp"` +} + +type TestStepFinished struct { + TestCaseStartedId string `json:"testCaseStartedId"` + TestStepId string `json:"testStepId"` + TestStepResult *TestStepResult `json:"testStepResult"` + Timestamp *Timestamp `json:"timestamp"` +} + +type TestStepResult struct { + Duration *Duration `json:"duration"` + Message string `json:"message,omitempty"` + Status TestStepResultStatus `json:"status"` + Exception *Exception `json:"exception,omitempty"` +} + +type TestStepStarted struct { + TestCaseStartedId string `json:"testCaseStartedId"` + TestStepId string `json:"testStepId"` + Timestamp *Timestamp `json:"timestamp"` +} + +type Timestamp struct { + Seconds int64 `json:"seconds"` + Nanos int64 `json:"nanos"` +} + +type UndefinedParameterType struct { + Expression string `json:"expression"` + Name string `json:"name"` +} + +type AttachmentContentEncoding string + +const ( + AttachmentContentEncoding_IDENTITY AttachmentContentEncoding = "IDENTITY" + AttachmentContentEncoding_BASE64 AttachmentContentEncoding = "BASE64" +) + +func (e AttachmentContentEncoding) String() string { + switch e { + case AttachmentContentEncoding_IDENTITY: + return "IDENTITY" + case AttachmentContentEncoding_BASE64: + return "BASE64" + default: + panic("Bad enum value for AttachmentContentEncoding") + } +} + +type PickleStepType string + +const ( + PickleStepType_UNKNOWN PickleStepType = "Unknown" + PickleStepType_CONTEXT PickleStepType = "Context" + PickleStepType_ACTION PickleStepType = "Action" + PickleStepType_OUTCOME PickleStepType = "Outcome" +) + +func (e PickleStepType) String() string { + switch e { + case PickleStepType_UNKNOWN: + return "Unknown" + case PickleStepType_CONTEXT: + return "Context" + case PickleStepType_ACTION: + return "Action" + case PickleStepType_OUTCOME: + return "Outcome" + default: + panic("Bad enum value for PickleStepType") + } +} + +type SourceMediaType string + +const ( + SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN SourceMediaType = "text/x.cucumber.gherkin+plain" + SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN SourceMediaType = "text/x.cucumber.gherkin+markdown" +) + +func (e SourceMediaType) String() string { + switch e { + case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN: + return "text/x.cucumber.gherkin+plain" + case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN: + return "text/x.cucumber.gherkin+markdown" + default: + panic("Bad enum value for SourceMediaType") + } +} + +type StepDefinitionPatternType string + +const ( + StepDefinitionPatternType_CUCUMBER_EXPRESSION StepDefinitionPatternType = "CUCUMBER_EXPRESSION" + StepDefinitionPatternType_REGULAR_EXPRESSION StepDefinitionPatternType = "REGULAR_EXPRESSION" +) + +func (e StepDefinitionPatternType) String() string { + switch e { + case StepDefinitionPatternType_CUCUMBER_EXPRESSION: + return "CUCUMBER_EXPRESSION" + case StepDefinitionPatternType_REGULAR_EXPRESSION: + return "REGULAR_EXPRESSION" + default: + panic("Bad enum value for StepDefinitionPatternType") + } +} + +type StepKeywordType string + +const ( + StepKeywordType_UNKNOWN StepKeywordType = "Unknown" + StepKeywordType_CONTEXT StepKeywordType = "Context" + StepKeywordType_ACTION StepKeywordType = "Action" + StepKeywordType_OUTCOME StepKeywordType = "Outcome" + StepKeywordType_CONJUNCTION StepKeywordType = "Conjunction" +) + +func (e StepKeywordType) String() string { + switch e { + case StepKeywordType_UNKNOWN: + return "Unknown" + case StepKeywordType_CONTEXT: + return "Context" + case StepKeywordType_ACTION: + return "Action" + case StepKeywordType_OUTCOME: + return "Outcome" + case StepKeywordType_CONJUNCTION: + return "Conjunction" + default: + panic("Bad enum value for StepKeywordType") + } +} + +type TestStepResultStatus string + +const ( + TestStepResultStatus_UNKNOWN TestStepResultStatus = "UNKNOWN" + TestStepResultStatus_PASSED TestStepResultStatus = "PASSED" + TestStepResultStatus_SKIPPED TestStepResultStatus = "SKIPPED" + TestStepResultStatus_PENDING TestStepResultStatus = "PENDING" + TestStepResultStatus_UNDEFINED TestStepResultStatus = "UNDEFINED" + TestStepResultStatus_AMBIGUOUS TestStepResultStatus = "AMBIGUOUS" + TestStepResultStatus_FAILED TestStepResultStatus = "FAILED" +) + +func (e TestStepResultStatus) String() string { + switch e { + case TestStepResultStatus_UNKNOWN: + return "UNKNOWN" + case TestStepResultStatus_PASSED: + return "PASSED" + case TestStepResultStatus_SKIPPED: + return "SKIPPED" + case TestStepResultStatus_PENDING: + return "PENDING" + case TestStepResultStatus_UNDEFINED: + return "UNDEFINED" + case TestStepResultStatus_AMBIGUOUS: + return "AMBIGUOUS" + case TestStepResultStatus_FAILED: + return "FAILED" + default: + panic("Bad enum value for TestStepResultStatus") + } +} diff --git a/vendor/github.com/cucumber/messages/go/v21/time_conversion.go b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go new file mode 100644 index 000000000..3a387931e --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go @@ -0,0 +1,34 @@ +package messages + +import "time" + +const nanosPerSecond = 1000000000 + +func DurationToGoDuration(duration Duration) time.Duration { + secondNanos := duration.Seconds * nanosPerSecond + return time.Duration(secondNanos + int64(duration.Nanos)) +} + +func GoDurationToDuration(goDuration time.Duration) Duration { + seconds := int64(goDuration / nanosPerSecond) + nanos := int64(goDuration % nanosPerSecond) + return Duration{ + Seconds: seconds, + Nanos: nanos, + } +} + +func TimestampToGoTime(timestamp Timestamp) time.Time { + return time.Unix(timestamp.Seconds, timestamp.Nanos) +} + +func GoTimeToTimestamp(t time.Time) Timestamp { + unixNanos := t.UnixNano() + seconds := unixNanos / nanosPerSecond + nanos := unixNanos % nanosPerSecond + + return Timestamp{ + Seconds: seconds, + Nanos: nanos, + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index 734cf61e3..6d016d05c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,49 +6,39 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## -## [0.6.0] - 2025-11-03 ## +## [0.6.1] - 2025-11-19 ## -> By the Power of Greyskull! +> At last up jumped the cunning spider, and fiercely held her fast. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. -While quite small code-wise, this release marks a very key point in the -development of filepath-securejoin. - -filepath-securejoin was originally intended (back in 2017) to simply be a -single-purpose library that would take some common code used in container -runtimes (specifically, Docker's `FollowSymlinksInScope`) and make it more -general-purpose (with the eventual goals of it ending up in the Go stdlib). - -Of course, I quickly discovered that this problem was actually far more -complicated to solve when dealing with racing attackers, which lead to me -developing `openat2(2)` and [libpathrs][]. I had originally planned for -libpathrs to completely replace filepath-securejoin "once it was ready" but in -the interim we needed to fix several race attacks in runc as part of security -advisories. Obviously we couldn't require the usage of a pre-0.1 Rust library -in runc so it was necessary to port bits of libpathrs into filepath-securejoin. -(Ironically the first prototypes of libpathrs were originally written in Go and -then rewritten to Rust, so the code in filepath-securejoin is actually Go code -that was rewritten to Rust then re-rewritten to Go.) - -It then became clear that pure-Go libraries will likely not be willing to -require CGo for all of their builds, so it was necessary to accept that -filepath-securejoin will need to stay. As such, in v0.5.0 we provided more -pure-Go implementations of features from libpathrs but moved them into -`pathrs-lite` subpackage to clarify what purpose these helpers serve. - -This release finally closes the loop and makes it so that pathrs-lite can -transparently use libpathrs (via a `libpathrs` build-tag). This means that -upstream libraries can use the pure Go version if they prefer, but downstreams -(either downstream library users or even downstream distributions) are able to -migrate to libpathrs for all usages of pathrs-lite in an entire Go binary. - -I should make it clear that I do not plan to port the rest of libpathrs to Go, -as I do not wish to maintain two copies of the same codebase. pathrs-lite -already provides the core essentials necessary to operate on paths safely for -most modern systems. Users who want additional hardening or more ergonomic APIs -are free to use [`cyphar.com/go-pathrs`][go-pathrs] (libpathrs's Go bindings). +## [0.5.2] - 2025-11-19 ## -[libpathrs]: https://github.com/cyphar/libpathrs -[go-pathrs]: https://cyphar.com/go-pathrs +> "Will you walk into my parlour?" said a spider to a fly. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. + +## [0.6.0] - 2025-11-03 ## + +> By the Power of Greyskull! ### Breaking ### - The deprecated `MkdirAll`, `MkdirAllHandle`, `OpenInRoot`, `OpenatInRoot` and @@ -56,12 +46,12 @@ are free to use [`cyphar.com/go-pathrs`][go-pathrs] (libpathrs's Go bindings). directly. ### Added ### -- `pathrs-lite` now has support for using [libpathrs][libpathrs] as a backend. - This is opt-in and can be enabled at build time with the `libpathrs` build - tag. The intention is to allow for downstream libraries and other projects to - make use of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` - package and distributors can then opt-in to using `libpathrs` for the entire - binary if they wish. +- `pathrs-lite` now has support for using libpathrs as a backend. This is + opt-in and can be enabled at build time with the `libpathrs` build tag. The + intention is to allow for downstream libraries and other projects to make use + of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` package + and distributors can then opt-in to using `libpathrs` for the entire binary + if they wish. ## [0.5.1] - 2025-10-31 ## @@ -440,8 +430,10 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...HEAD -[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.6.0 +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.1...HEAD +[0.6.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...v0.6.1 +[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.6.0 +[0.5.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.5.2 [0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index a918a2aa1..ee6cdce3c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.6.0 +0.6.1 diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index 568ce16d7..7cea1af8b 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -16,7 +16,7 @@ linters: - godox - gosmopolitan - inamedparam - - intrange # disabled while < go1.22 + #- intrange # disabled while < go1.22 - ireturn - lll - musttag diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 0108f1d57..45bd31b14 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -13,7 +13,14 @@ Completed YES Tested YES ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + + + +also known as [RFC6901](https://www.rfc-editor.org/rfc/rfc6901) ### Note + The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. + +That is because our implementation of the JSON pointer only supports explicit references to array elements: the provision in the spec +to resolve non-existent members as "the last element in the array", using the special trailing character "-". diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index afc8a7840..7513c4763 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -41,8 +41,10 @@ const ( pointerSeparator = `/` ) -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() +var ( + jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() + jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() +) // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process @@ -56,18 +58,17 @@ type JSONSetable interface { JSONSet(string, any) error } -// Pointer the json pointer reprsentation +// Pointer is a representation of a json pointer type Pointer struct { referenceTokens []string } // New creates a new json pointer for the given string func New(jsonPointerString string) (Pointer, error) { - var p Pointer err := p.parse(jsonPointerString) - return p, err + return p, err } // Get uses the pointer to retrieve a value from a JSON document @@ -80,7 +81,7 @@ func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, jsonname.DefaultJSONNameProvider) } -// DecodedTokens returns the decoded tokens +// DecodedTokens returns the decoded tokens of this JSON pointer func (p *Pointer) DecodedTokens() []string { result := make([]string, 0, len(p.referenceTokens)) for _, t := range p.referenceTokens { @@ -102,9 +103,7 @@ func (p *Pointer) String() string { return emptyPointer } - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString + return pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) } func (p *Pointer) Offset(document string) (int64, error) { @@ -185,7 +184,7 @@ func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, refle func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error { knd := reflect.ValueOf(node).Kind() - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + if knd != reflect.Pointer && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { return errors.Join( ErrUnsupportedValueType, ErrPointer, @@ -225,7 +224,7 @@ func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error return err } fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { node = fld.Addr().Interface() continue } @@ -240,7 +239,7 @@ func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { node = fld.Addr().Interface() continue } @@ -253,7 +252,7 @@ func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error if !mv.IsValid() { return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Pointer { node = mv.Addr().Interface() continue } @@ -270,7 +269,7 @@ func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error } elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Pointer { node = elem.Addr().Interface() continue } @@ -291,7 +290,7 @@ func isNil(input any) bool { kind := reflect.TypeOf(input).Kind() switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Chan: return reflect.ValueOf(input).IsNil() default: return false @@ -520,16 +519,17 @@ const ( decRefTok1 = `/` ) +var ( + encRefTokReplacer = strings.NewReplacer(encRefTok1, decRefTok1, encRefTok0, decRefTok0) + decRefTokReplacer = strings.NewReplacer(decRefTok1, encRefTok1, decRefTok0, encRefTok0) +) + // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) - step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) - return step2 + return encRefTokReplacer.Replace(token) } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) - step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) - return step2 + return decRefTokReplacer.Replace(token) } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 568ce16d7..7cea1af8b 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -16,7 +16,7 @@ linters: - godox - gosmopolitan - inamedparam - - intrange # disabled while < go1.22 + #- intrange # disabled while < go1.22 - ireturn - lll - musttag diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore new file mode 100644 index 000000000..666dbbb5b --- /dev/null +++ b/vendor/github.com/gofrs/uuid/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# binary bundle generated by go-fuzz +uuid-fuzz.zip diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE new file mode 100644 index 000000000..926d54987 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md new file mode 100644 index 000000000..f5db14f07 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/README.md @@ -0,0 +1,117 @@ +# UUID + +[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) +[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) +[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) + +Package uuid provides a pure Go implementation of Universally Unique Identifiers +(UUID) variant as defined in RFC-4122. This package supports both the creation +and parsing of UUIDs in different formats. + +This package supports the following UUID versions: +* Version 1, based on timestamp and MAC address (RFC-4122) +* Version 3, based on MD5 hashing of a named value (RFC-4122) +* Version 4, based on random numbers (RFC-4122) +* Version 5, based on SHA-1 hashing of a named value (RFC-4122) + +This package also supports experimental Universally Unique Identifier implementations based on a +[draft RFC](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) that updates RFC-4122 +* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122) +* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122) + +The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases +to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted. + +## Project History + +This project was originally forked from the +[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after +it appeared to be no longer maintained, while exhibiting [critical +flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take +over this project to ensure it receives regular maintenance for the benefit of +the larger Go community. + +We'd like to thank Maxim Bublis for his hard work on the original iteration of +the package. + +## License + +This source code of this package is released under the MIT License. Please see +the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full +content of the license. + +## Recommended Package Version + +We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were +created before our fork of the original package and have some known +deficiencies. + +## Installation + +It is recommended to use a package manager like `dep` that understands tagged +releases of a package, as well as semantic versioning. + +If you are unable to make use of a dependency manager with your project, you can +use the `go get` command to download it directly: + +```Shell +$ go get github.com/gofrs/uuid +``` + +## Requirements + +Due to subtests not being supported in older versions of Go, this package is +only regularly tested against Go 1.7+. This package may work perfectly fine with +Go 1.2+, but support for these older versions is not actively maintained. + +## Go 1.11 Modules + +As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. + +An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. + +Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. + +## Usage + +Here is a quick overview of how to use this package. For more detailed +documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). + +```go +package main + +import ( + "log" + + "github.com/gofrs/uuid" +) + +// Create a Version 4 UUID, panicking on error. +// Use this form to initialize package-level variables. +var u1 = uuid.Must(uuid.NewV4()) + +func main() { + // Create a Version 4 UUID. + u2, err := uuid.NewV4() + if err != nil { + log.Fatalf("failed to generate UUID: %v", err) + } + log.Printf("generated Version 4 UUID %v", u2) + + // Parse a UUID from a string. + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + u3, err := uuid.FromString(s) + if err != nil { + log.Fatalf("failed to parse UUID %q: %v", s, err) + } + log.Printf("successfully parsed UUID %v", u3) +} +``` + +## References + +* [RFC-4122](https://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) +* [New UUID Formats RFC Draft (Peabody) Rev 03](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go new file mode 100644 index 000000000..e3014c68c --- /dev/null +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -0,0 +1,212 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns a UUID generated from the raw byte slice input. +// It will return an error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (UUID, error) { + u := UUID{} + err := u.UnmarshalBinary(input) + return u, err +} + +// FromBytesOrNil returns a UUID generated from the raw byte slice input. +// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns a UUID parsed from the input string. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (UUID, error) { + u := UUID{} + err := u.UnmarshalText([]byte(input)) + return u, err +} + +// FromStringOrNil returns a UUID parsed from the input string. +// Same behavior as FromString(), but returns uuid.Nil instead of an error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by the String() method. +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// +// ABNF for supported UUID text representation follows: +// +// URN := 'urn' +// UUID-NID := 'uuid' +// +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct +// +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn +// +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +// +func (u *UUID) UnmarshalText(text []byte) error { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 34, 38: + return u.decodeBraced(text) + case 36: + return u.decodeCanonical(text) + case 41, 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) + } +} + +// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) error { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + src := t + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return err + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return nil +} + +// decodeHashLike decodes UUID strings that are using the following format: +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) error { + src := t[:] + dst := u[:] + + _, err := hex.Decode(dst, src) + return err +} + +// decodeBraced decodes UUID strings that are using the following formats: +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) error { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID strings that are using the following formats: +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) error { + total := len(t) + + urnUUIDPrefix := t[:9] + + if !bytes.Equal(urnUUIDPrefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID strings that are using the following formats: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) error { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() ([]byte, error) { + return u.Bytes(), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return an error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) != Size { + return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + } + copy(u[:], data) + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go new file mode 100644 index 000000000..afaefbc8e --- /dev/null +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Andrei Tudor Călin +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// +build gofuzz + +package uuid + +// Fuzz implements a simple fuzz test for FromString / UnmarshalText. +// +// To run: +// +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// +// If you make significant changes to FromString / UnmarshalText and add +// new cases to fromStringTests (in codec_test.go), please run +// +// $ go test -seed_fuzz_corpus +// +// to seed the corpus with the new interesting inputs, then run the fuzzer. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go new file mode 100644 index 000000000..4550bc6b3 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -0,0 +1,356 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "fmt" + "hash" + "io" + "net" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +type epochFunc func() time.Time + +// HWAddrFunc is the function type used to provide hardware (MAC) addresses. +type HWAddrFunc func() (net.HardwareAddr, error) + +// DefaultGenerator is the default UUID Generator used by this package. +var DefaultGenerator Generator = NewGen() + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func NewV1() (UUID, error) { + return DefaultGenerator.NewV1() +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return DefaultGenerator.NewV3(ns, name) +} + +// NewV4 returns a randomly generated UUID. +func NewV4() (UUID, error) { + return DefaultGenerator.NewV4() +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return DefaultGenerator.NewV5(ns, name) +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV6() (UUID, error) { + return DefaultGenerator.NewV6() +} + +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV7() (UUID, error) { + return DefaultGenerator.NewV7() +} + +// Generator provides an interface for generating UUIDs. +type Generator interface { + NewV1() (UUID, error) + NewV3(ns UUID, name string) UUID + NewV4() (UUID, error) + NewV5(ns UUID, name string) UUID + NewV6() (UUID, error) + NewV7() (UUID, error) +} + +// Gen is a reference UUID generator based on the specifications laid out in +// RFC-4122 and DCE 1.1: Authentication and Security Services. This type +// satisfies the Generator interface as defined in this package. +// +// For consumers who are generating V1 UUIDs, but don't want to expose the MAC +// address of the node generating the UUIDs, the NewGenWithHWAF() function has been +// provided as a convenience. See the function's documentation for more info. +// +// The authors of this package do not feel that the majority of users will need +// to obfuscate their MAC address, and so we recommend using NewGen() to create +// a new generator. +type Gen struct { + clockSequenceOnce sync.Once + hardwareAddrOnce sync.Once + storageMutex sync.Mutex + + rand io.Reader + + epochFunc epochFunc + hwAddrFunc HWAddrFunc + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +// interface check -- build will fail if *Gen doesn't satisfy Generator +var _ Generator = (*Gen)(nil) + +// NewGen returns a new instance of Gen with some default values set. Most +// people should use this. +func NewGen() *Gen { + return NewGenWithHWAF(defaultHWAddrFunc) +} + +// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most +// consumers should use NewGen() instead. +// +// This is used so that consumers can generate their own MAC addresses, for use +// in the generated UUIDs, if there is some concern about exposing the physical +// address of the machine generating the UUID. +// +// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC +// address for all the future UUIDs generated by it. If you'd like to switch the +// MAC address being used, you'll need to create a new generator using this +// function. +func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { + return &Gen{ + epochFunc: time.Now, + hwAddrFunc: hwaf, + rand: rand.Reader, + } +} + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func (g *Gen) NewV1() (UUID, error) { + u := UUID{} + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + hardwareAddr, err := g.getHardwareAddr() + if err != nil { + return Nil, err + } + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func (g *Gen) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns a randomly generated UUID. +func (g *Gen) NewV4() (UUID, error) { + u := UUID{} + if _, err := io.ReadFull(g.rand, u[:]); err != nil { + return Nil, err + } + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func (g *Gen) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV6() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid + binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits) + binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits) + + u.SetVersion(V6) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs. +func (g *Gen) getClockSequence() (uint64, uint16, error) { + var err error + g.clockSequenceOnce.Do(func() { + buf := make([]byte, 2) + if _, err = io.ReadFull(g.rand, buf); err != nil { + return + } + g.clockSequence = binary.BigEndian.Uint16(buf) + }) + if err != nil { + return 0, 0, err + } + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := g.getEpoch() + // Clock didn't change since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, nil +} + +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV7() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[6:]); err != nil { + return Nil, err + } + + tn := g.epochFunc() + ms := uint64(tn.Unix())*1e3 + uint64(tn.Nanosecond())/1e6 + u[0] = byte(ms >> 40) + u[1] = byte(ms >> 32) + u[2] = byte(ms >> 24) + u[3] = byte(ms >> 16) + u[4] = byte(ms >> 8) + u[5] = byte(ms) + + u.SetVersion(V7) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// Returns the hardware address. +func (g *Gen) getHardwareAddr() ([]byte, error) { + var err error + g.hardwareAddrOnce.Do(func() { + var hwAddr net.HardwareAddr + if hwAddr, err = g.hwAddrFunc(); err == nil { + copy(g.hardwareAddr[:], hwAddr) + return + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence. + if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { + return + } + // Set multicast bit as recommended by RFC-4122 + g.hardwareAddr[0] |= 0x01 + }) + if err != nil { + return []byte{}, err + } + return g.hardwareAddr[:], nil +} + +// Returns the difference between UUID epoch (October 15, 1582) +// and current time in 100-nanosecond intervals. +func (g *Gen) getEpoch() uint64 { + return epochStart + uint64(g.epochFunc().UnixNano()/100) +} + +// Returns the UUID based on the hashing of the namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +var netInterfaces = net.Interfaces + +// Returns the hardware address. +func defaultHWAddrFunc() (net.HardwareAddr, error) { + ifaces, err := netInterfaces() + if err != nil { + return []byte{}, err + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + return iface.HardwareAddr, nil + } + } + return []byte{}, fmt.Errorf("uuid: no HW address found") +} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go new file mode 100644 index 000000000..6f254a4fd --- /dev/null +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -0,0 +1,109 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice will be handled by UnmarshalBinary, while +// a longer byte slice or a string will be handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case UUID: // support gorm convert from UUID to NullUUID + *u = src + return nil + + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database. +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// MarshalJSON marshals the NullUUID as null or the nested UUID +func (u NullUUID) MarshalJSON() ([]byte, error) { + if !u.Valid { + return json.Marshal(nil) + } + + return json.Marshal(u.UUID) +} + +// UnmarshalJSON unmarshals a NullUUID +func (u *NullUUID) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + u.UUID, u.Valid = Nil, false + return nil + } + + if err := json.Unmarshal(b, &u.UUID); err != nil { + return err + } + + u.Valid = true + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go new file mode 100644 index 000000000..e747e5412 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -0,0 +1,292 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementations of the Universally Unique Identifier +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03). +// +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The +// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable +// UUIDs, versions 6 and 7. +// +// DCE 1.1[3] provides the specification for version 2, but version 2 support +// was removed from this package in v4 due to some concerns with the +// specification itself. Reading the spec, it seems that it would result in +// generating UUIDs that aren't very unique. In having read the spec it seemed +// that our implementation did not meet the spec. It also seems to be at-odds +// with RFC 4122, meaning we would need quite a bit of special code to support +// it. Lastly, there were no Version 2 implementations that we could find to +// ensure we were understanding the specification correctly. +// +// [1] https://tools.ietf.org/html/rfc4122 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03 +// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +package uuid + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strings" + "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. +type UUID [Size]byte + +// UUID versions. +const ( + _ byte = iota + V1 // Version 1 (date-time and MAC address) + _ // Version 2 (date-time and MAC address, DCE security version) [removed] + V3 // Version 3 (namespace name-based) + V4 // Version 4 (random) + V5 // Version 5 (namespace name-based) + V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft] + V7 // Version 7 (k-sortable timestamp and random data) [peabody draft] + _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, +// 15 October 1582 within a V1 UUID. This type has no meaning for other +// UUID versions since they don't have an embedded timestamp. +type Timestamp uint64 + +const _100nsPerSecond = 10000000 + +// Time returns the UTC time.Time representation of a Timestamp +func (t Timestamp) Time() (time.Time, error) { + secs := uint64(t) / _100nsPerSecond + nsecs := 100 * (uint64(t) % _100nsPerSecond) + + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil +} + +// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. +// Returns an error if the UUID is any version other than 1. +func TimestampFromV1(u UUID) (Timestamp, error) { + if u.Version() != 1 { + err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) + return 0, err + } + + low := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil +} + +// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This +// function returns an error if the UUID is any version other than 6. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func TimestampFromV6(u UUID) (Timestamp, error) { + if u.Version() != 6 { + return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version()) + } + + hi := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + low := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil +} + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to +// zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// IsNil returns if the UUID is equal to the nil UUID +func (u UUID) IsNil() bool { + return u == Nil +} + +// Version returns the algorithm version used to generate the UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns the UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns a byte slice representation of the UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// String returns a canonical RFC-4122 string representation of the UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// Format implements fmt.Formatter for UUID values. +// +// The behavior is as follows: +// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. +// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. +// The 'S' verb returns the RFC-4122 format, but with capital hex digits. +// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. +// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return +// "%!verb(uuid.UUID=value)" as recommended by the fmt package. +func (u UUID) Format(f fmt.State, c rune) { + switch c { + case 'x', 'X': + s := hex.EncodeToString(u.Bytes()) + if c == 'X' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'v': + var s string + if f.Flag('#') { + s = fmt.Sprintf("%#v", [Size]byte(u)) + } else { + s = u.String() + } + _, _ = io.WriteString(f, s) + case 's', 'S': + s := u.String() + if c == 'S' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'q': + _, _ = io.WriteString(f, `"`+u.String()+`"`) + default: + // invalid/unsupported format verb + fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) + } +} + +func toCapitalHexDigits(ch rune) rune { + // convert a-f hex digits to A-F + switch ch { + case 'a': + return 'A' + case 'b': + return 'B' + case 'c': + return 'C' + case 'd': + return 'D' + case 'e': + return 'E' + case 'f': + return 'F' + default: + return ch + } +} + +// SetVersion sets the version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets the variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 000000000..86c6d03fb --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,23 @@ +# UNRELEASED + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 000000000..aca15a642 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 000000000..a63674775 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 000000000..168bda76d --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,676 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn) Clone() *Txn { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 000000000..f17d0a644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node) { + i.stack = append(i.stack, edges{edge{node: n}}) + } + + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + { + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 000000000..359854808 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,334 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 000000000..3c6a22525 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + { + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 000000000..554fa7129 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,239 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + found := func(n *Node) { + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore new file mode 100644 index 000000000..11b90db8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md new file mode 100644 index 000000000..080b7447b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/README.md @@ -0,0 +1,146 @@ +# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master) + +Provides the `memdb` package that implements a simple in-memory database +built on immutable radix trees. The database provides Atomicity, Consistency +and Isolation from ACID. Being that it is in-memory, it does not provide durability. +The database is instantiated with a schema that specifies the tables and indices +that exist and allows transactions to be executed. + +The database provides the following: + +* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees + the database is able to support any number of concurrent readers without locking, + and allows a writer to make progress. + +* Transaction Support - The database allows for rich transactions, in which multiple + objects are inserted, updated or deleted. The transactions can span multiple tables, + and are applied atomically. The database provides atomicity and isolation in ACID + terminology, such that until commit the updates are not visible. + +* Rich Indexing - Tables can support any number of indexes, which can be simple like + a single field index, or more advanced compound field indexes. Certain types like + UUID can be efficiently compressed from strings into byte indexes for reduced + storage requirements. + +* Watches - Callers can populate a watch set as part of a query, which can be used to + detect when a modification has been made to the database which affects the query + results. This lets callers easily watch for changes in the database in a very general + way. + +For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb). + +Example +======= + +Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage + +```go +// Create a sample struct +type Person struct { + Email string + Name string + Age int +} + +// Create the DB schema +schema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + "person": &memdb.TableSchema{ + Name: "person", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Email"}, + }, + "age": &memdb.IndexSchema{ + Name: "age", + Unique: false, + Indexer: &memdb.IntFieldIndex{Field: "Age"}, + }, + }, + }, + }, +} + +// Create a new data base +db, err := memdb.NewMemDB(schema) +if err != nil { + panic(err) +} + +// Create a write transaction +txn := db.Txn(true) + +// Insert some people +people := []*Person{ + &Person{"joe@aol.com", "Joe", 30}, + &Person{"lucy@aol.com", "Lucy", 35}, + &Person{"tariq@aol.com", "Tariq", 21}, + &Person{"dorothy@aol.com", "Dorothy", 53}, +} +for _, p := range people { + if err := txn.Insert("person", p); err != nil { + panic(err) + } +} + +// Commit the transaction +txn.Commit() + +// Create read-only transaction +txn = db.Txn(false) +defer txn.Abort() + +// Lookup by email +raw, err := txn.First("person", "id", "joe@aol.com") +if err != nil { + panic(err) +} + +// Say hi! +fmt.Printf("Hello %s!\n", raw.(*Person).Name) + +// List all the people +it, err := txn.Get("person", "id") +if err != nil { + panic(err) +} + +fmt.Println("All the people:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + fmt.Printf(" %s\n", p.Name) +} + +// Range scan over people with ages between 25 and 35 inclusive +it, err = txn.LowerBound("person", "age", 25) +if err != nil { + panic(err) +} + +fmt.Println("People aged 25 - 35:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + if p.Age > 35 { + break + } + fmt.Printf(" %s is aged %d\n", p.Name, p.Age) +} +// Output: +// Hello Joe! +// All the people: +// Dorothy +// Joe +// Lucy +// Tariq +// People aged 25 - 35: +// Joe is aged 30 +// Lucy is aged 35 +``` + diff --git a/vendor/github.com/hashicorp/go-memdb/changes.go b/vendor/github.com/hashicorp/go-memdb/changes.go new file mode 100644 index 000000000..35089f5ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/changes.go @@ -0,0 +1,34 @@ +package memdb + +// Changes describes a set of mutations to memDB tables performed during a +// transaction. +type Changes []Change + +// Change describes a mutation to an object in a table. +type Change struct { + Table string + Before interface{} + After interface{} + + // primaryKey stores the raw key value from the primary index so that we can + // de-duplicate multiple updates of the same object in the same transaction + // but we don't expose this implementation detail to the consumer. + primaryKey []byte +} + +// Created returns true if the mutation describes a new object being inserted. +func (m *Change) Created() bool { + return m.Before == nil && m.After != nil +} + +// Updated returns true if the mutation describes an existing object being +// updated. +func (m *Change) Updated() bool { + return m.Before != nil && m.After != nil +} + +// Deleted returns true if the mutation describes an existing object being +// deleted. +func (m *Change) Deleted() bool { + return m.Before != nil && m.After == nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 000000000..0071ab311 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,38 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +// NewFilterIterator wraps a ResultIterator. The filter function is applied +// to each value returned by a call to iter.Next. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned FilterIterator. +func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: iter, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator. +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 000000000..172a0e86b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,931 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + isPtr := fv.Kind() == reflect.Ptr + fv = reflect.Indirect(fv) + if !isPtr && !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr) + } + + if isPtr && !fv.IsValid() { + val := "" + return false, []byte(val), nil + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +// +// Note that although FromArgs in theory supports using either one or +// two arguments, there is a bug: FromObject only creates an index +// using key/value, and does not also create an index using key. This +// means a lookup using one argument will never actually work. +// +// It is currently left as-is to prevent backwards compatibility +// issues. +// +// TODO: Fix this in the next major bump. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +// WARNING: Because of a bug in FromObject, this function will never return +// a value when using the single-argument version. +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// IntFieldIndex is used to extract an int field from an object using +// reflection and builds an index on that field. +type IntFieldIndex struct { + Field string +} + +func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsIntType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k) + } + + // Get the value and encode it + val := fv.Int() + buf := encodeInt(val, size) + + return true, buf, nil +} + +func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsIntType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a int", k) + } + + val := v.Int() + buf := encodeInt(val, size) + + return buf, nil +} + +func encodeInt(val int64, size int) []byte { + buf := make([]byte, size) + + // This bit flips the sign bit on any sized signed twos-complement integer, + // which when truncated to a uint of the same size will bias the value such + // that the maximum negative int becomes 0, and the maximum positive int + // becomes the maximum positive uint. + scaled := val ^ int64(-1<<(size*8-1)) + + switch size { + case 1: + buf[0] = uint8(scaled) + case 2: + binary.BigEndian.PutUint16(buf, uint16(scaled)) + case 4: + binary.BigEndian.PutUint32(buf, uint32(scaled)) + case 8: + binary.BigEndian.PutUint64(buf, uint64(scaled)) + default: + panic(fmt.Sprintf("unsupported int size parameter: %d", size)) + } + + return buf +} + +// IsIntType returns whether the passed type is a type of int and the number +// of bytes needed to encode the type. +func IsIntType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Int: + return strconv.IntSize / 8, true + case reflect.Int8: + return 1, true + case reflect.Int16: + return 2, true + case reflect.Int32: + return 4, true + case reflect.Int64: + return 8, true + default: + return 0, false + } +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := encodeUInt(val, size) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := encodeUInt(val, size) + + return buf, nil +} + +func encodeUInt(val uint64, size int) []byte { + buf := make([]byte, size) + + switch size { + case 1: + buf[0] = uint8(val) + case 2: + binary.BigEndian.PutUint16(buf, uint16(val)) + case 4: + binary.BigEndian.PutUint32(buf, uint32(val)) + case 8: + binary.BigEndian.PutUint64(buf, val) + default: + panic(fmt.Sprintf("unsupported uint size parameter: %d", size)) + } + + return buf +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return strconv.IntSize / 8, true + case reflect.Uint8: + return 1, true + case reflect.Uint16: + return 2, true + case reflect.Uint32: + return 4, true + case reflect.Uint64: + return 8, true + default: + return 0, false + } +} + +// BoolFieldIndex is used to extract an boolean field from an object using +// reflection and builds an index on that field. +type BoolFieldIndex struct { + Field string +} + +func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + if k != reflect.Bool { + return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k) + } + + // Get the value and encode it + buf := make([]byte, 1) + if fv.Bool() { + buf[0] = 1 + } + + return true, buf, nil +} + +func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("non-equivalent argument count and index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} + +// CompoundMultiIndex is used to build an index using multiple +// sub-indexes. +// +// Unlike CompoundIndex, CompoundMultiIndex can have both +// SingleIndexer and MultiIndexer sub-indexers. However, each +// MultiIndexer adds considerable overhead/complexity in terms of +// the number of indexes created under-the-hood. It is not suggested +// to use more than one or two, if possible. +// +// Another change from CompoundIndexer is that if AllowMissing is +// set, not only is it valid to have empty index fields, but it will +// still create index values up to the first empty index. This means +// that if you have a value with an empty field, rather than using a +// prefix for lookup, you can simply pass in less arguments. As an +// example, if {Foo, Bar} is indexed but Bar is missing for a value +// and AllowMissing is set, an index will still be created for {Foo} +// and it is valid to do a lookup passing in only Foo as an argument. +// Note that the ordering isn't guaranteed -- it's last-insert wins, +// but this is true if you have two objects that have the same +// indexes not using AllowMissing anyways. +// +// Because StringMapFieldIndexers can take a varying number of args, +// it is currently a requirement that whenever it is used, two +// arguments must _always_ be provided for it. In theory we only +// need one, except a bug in that indexer means the single-argument +// version will never work. You can leave the second argument nil, +// but it will never produce a value. We support this for whenever +// that bug is fixed, likely in a next major version bump. +// +// Prefix-based indexing is not currently supported. +type CompoundMultiIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // CompoundMultiIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) { + // At each entry, builder is storing the results from the next index + builder := make([][][]byte, 0, len(c.Indexes)) + +forloop: + // This loop goes through each indexer and adds the value(s) provided to the next + // entry in the slice. We can then later walk it like a tree to construct the indices. + for i, idxRaw := range c.Indexes { + switch idx := idxRaw.(type) { + case SingleIndexer: + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + builder = append(builder, [][]byte{val}) + + case MultiIndexer: + ok, vals, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + + // Add each of the new values to each of the old values + builder = append(builder, vals) + + default: + return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i) + } + } + + // Start with something higher to avoid resizing if possible + out := make([][]byte, 0, len(c.Indexes)^3) + + // We are walking through the builder slice essentially in a depth-first fashion, + // building the prefix and leaves as we go. If AllowMissing is false, we only insert + // these full paths to leaves. Otherwise, we also insert each prefix along the way. + // This allows for lookup in FromArgs when AllowMissing is true that does not contain + // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo + // field specified as "abc", it is valid to call FromArgs with just "abc". + var walkVals func([]byte, int) + walkVals = func(currPrefix []byte, depth int) { + if depth >= len(builder) { + return + } + + if depth == len(builder)-1 { + // These are the "leaves", so append directly + for _, v := range builder[depth] { + outcome := make([]byte, len(currPrefix)) + copy(outcome, currPrefix) + out = append(out, append(outcome, v...)) + } + return + } + for _, v := range builder[depth] { + nextPrefix := append(currPrefix, v...) + if c.AllowMissing { + out = append(out, nextPrefix) + } + walkVals(nextPrefix, depth+1) + } + } + + walkVals(nil, 0) + + return true, out, nil +} + +func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) { + var stringMapCount int + var argCount int + for _, index := range c.Indexes { + if argCount >= len(args) { + break + } + if _, ok := index.(*StringMapFieldIndex); ok { + // We require pairs for StringMapFieldIndex, but only got one + if argCount+1 >= len(args) { + return nil, errors.New("invalid number of arguments") + } + stringMapCount++ + argCount += 2 + } else { + argCount++ + } + } + argCount = 0 + + switch c.AllowMissing { + case true: + if len(args) > len(c.Indexes)+stringMapCount { + return nil, errors.New("too many arguments") + } + + default: + if len(args) != len(c.Indexes)+stringMapCount { + return nil, errors.New("number of arguments does not equal number of indexers") + } + } + + var out []byte + var val []byte + var err error + for i, idx := range c.Indexes { + if argCount >= len(args) { + // We're done; should only hit this if AllowMissing + break + } + if _, ok := idx.(*StringMapFieldIndex); ok { + if args[argCount+1] == nil { + val, err = idx.FromArgs(args[argCount]) + } else { + val, err = idx.FromArgs(args[argCount : argCount+2]...) + } + argCount += 2 + } else { + val, err = idx.FromArgs(args[argCount]) + argCount++ + } + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 000000000..0508d0aae --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,116 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database providing Atomicity, Consistency, and +// Isolation from ACID. MemDB doesn't provide Durability since it is an +// in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +// +// Objects inserted into MemDB are not copied. It is **extremely important** +// that objects are not modified in-place after they are inserted since they +// are stored directly in MemDB. It remains unsafe to modify inserted objects +// even after they've been deleted from MemDB since there may still be older +// snapshots of the DB being read from other goroutines. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema. +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// DBSchema returns schema in use for introspection. +// +// The method is intended for *read-only* debugging use cases, +// returned schema should *never be modified in-place*. +func (db *MemDB) DBSchema() *DBSchema { + return db.schema +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot of the database that +// will not be affected by any write operations to the existing DB. +// +// If MemDB is storing reference-based values (pointers, maps, slices, etc.), +// the Snapshot will not deep copy those values. Therefore, it is still unsafe +// to modify any inserted values in either DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 000000000..e6a9b526b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 000000000..951c2a1d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,1021 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + // changes is used to track the changes performed during the transaction. If + // it is nil at transaction start then changes are not tracked. + changes Changes + + modified map[tableIndex]*iradix.Txn +} + +// TrackChanges enables change tracking for the transaction. If called at any +// point before commit, subsequent mutations will be recorded and can be +// retrieved using ChangeSet. Once this has been called on a transaction it +// can't be unset. As with other Txn methods it's not safe to call this from a +// different goroutine than the one making mutations or committing the +// transaction. +func (txn *Txn) TrackChanges() { + if txn.changes == nil { + txn.changes = make(Changes, 0, 1) + } +} + +// readableIndex returns a transaction usable for reading the given index in a +// table. If the transaction is a write transaction with modifications, a clone of the +// modified index will be returned. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist.Clone() + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions, +// already aborted or commited transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + txn.changes = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions, +// already aborted or committed transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table. +// +// When updating an object, the obj provided should be a copy rather +// than a value updated in-place. Modifying values in-place that are already +// inserted into MemDB is not supported behavior. +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + if txn.changes != nil { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, // might be nil on a create + After: obj, + primaryKey: idVal, + }) + } + return nil +} + +// Delete is used to delete a single object from the given table. +// This object must already exist in the table. +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check if we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + if txn.changes != nil { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, + After: nil, // Now nil indicates deletion + primaryKey: idVal, + }) + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + if txn.changes != nil { + // Record the deletion + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if ok { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, + After: nil, // Now nil indicates deletion + primaryKey: idVal, + }) + } + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +// +// The watch channel is closed when a subsequent write transaction +// has updated the result of the query. Since each read transaction +// operates on an isolated snapshot, a new read transaction must be +// started to observe the changes that have been made. +// +// If the value of index ends with "_prefix", FirstWatch will perform a prefix +// match instead of full match on the index. The registered indexer must implement +// PrefixIndexer, otherwise an error is returned. +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// LastWatch is used to return the last matching object for +// the given constraints on the index along with the watch channel. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +// +// The watch channel is closed when a subsequent write transaction +// has updated the result of the query. Since each read transaction +// operates on an isolated snapshot, a new read transaction must be +// started to observe the changes that have been made. +// +// If the value of index ends with "_prefix", LastWatch will perform a prefix +// match instead of full match on the index. The registered indexer must implement +// PrefixIndexer, otherwise an error is returned. +func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the last value + iter := indexTxn.Root().ReverseIterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Previous() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// Last is used to return the last matching object for +// the given constraints on the index. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.LastWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results from a query on a table. +// +// When a ResultIterator is created from a write transaction, the results from +// Next will reflect a snapshot of the table at the time the ResultIterator is +// created. +// This means that calling Insert or Delete on a transaction while iterating is +// allowed, but the changes made by Insert or Delete will not be observed in the +// results returned from subsequent calls to Next. For example if an item is deleted +// from the index used by the iterator it will still be returned by Next. If an +// item is inserted into the index used by the iterator, it will not be returned +// by Next. However, an iterator created after a call to Insert or Delete will +// reflect the modifications. +// +// When a ResultIterator is created from a write transaction, and there are already +// modifications to the index used by the iterator, the modification cache of the +// index will be invalidated. This may result in some additional allocations if +// the same node in the index is modified again. +type ResultIterator interface { + WatchCh() <-chan struct{} + // Next returns the next result from the iterator. If there are no more results + // nil is returned. + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the rows that match the +// given constraints of an index. The index values must match exactly (this +// is not a range-based or prefix-based lookup) by default. +// +// Prefix lookups: if the named index implements PrefixIndexer, you may perform +// prefix-based lookups by appending "_prefix" to the index name. In this +// scenario, the index values given in args are treated as prefix lookups. For +// example, a StringFieldIndex will match any string with the given value +// as a prefix: "mem" matches "memdb". +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// GetReverse is used to construct a Reverse ResultIterator over all the +// rows that match the given constraints of an index. +// The returned ResultIterator's Next() will return the next Previous value. +// +// See the documentation on Get for details on arguments. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// LowerBound is used to construct a ResultIterator over all the the range of +// rows that have an index value greater than or equal to the provide args. +// Calling this then iterating until the rows are larger than required allows +// range scans within an index. It is not possible to watch the resulting +// iterator since the radix tree doesn't efficiently allow watching on lower +// bound changes. The WatchCh returned will be nill and so will block forever. +// +// If the value of index ends with "_prefix", LowerBound will perform a prefix match instead of +// a full match on the index. The registered index must implement PrefixIndexer, +// otherwise an error is returned. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekLowerBound(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + } + return iter, nil +} + +// ReverseLowerBound is used to construct a Reverse ResultIterator over all the +// the range of rows that have an index value less than or equal to the +// provide args. Calling this then iterating until the rows are lower than +// required allows range scans within an index. It is not possible to watch the +// resulting iterator since the radix tree doesn't efficiently allow watching +// on lower bound changes. The WatchCh returned will be nill and so will block +// forever. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekReverseLowerBound(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + } + return iter, nil +} + +// objectID is a tuple of table name and the raw internal id byte slice +// converted to a string. It's only converted to a string to make it comparable +// so this struct can be used as a map index. +type objectID struct { + Table string + IndexVal string +} + +// mutInfo stores metadata about mutations to allow collapsing multiple +// mutations to the same object into one. +type mutInfo struct { + firstBefore interface{} + lastIdx int +} + +// Changes returns the set of object changes that have been made in the +// transaction so far. If change tracking is not enabled it wil always return +// nil. It can be called before or after Commit. If it is before Commit it will +// return all changes made so far which may not be the same as the final +// Changes. After abort it will always return nil. As with other Txn methods +// it's not safe to call this from a different goroutine than the one making +// mutations or committing the transaction. Mutations will appear in the order +// they were performed in the transaction but multiple operations to the same +// object will be collapsed so only the effective overall change to that object +// is present. If transaction operations are dependent (e.g. copy object X to Y +// then delete X) this might mean the set of mutations is incomplete to verify +// history, but it is complete in that the net effect is preserved (Y got a new +// value, X got removed). +func (txn *Txn) Changes() Changes { + if txn.changes == nil { + return nil + } + + // De-duplicate mutations by key so all take effect at the point of the last + // write but we keep the mutations in order. + dups := make(map[objectID]mutInfo) + for i, m := range txn.changes { + oid := objectID{ + Table: m.Table, + IndexVal: string(m.primaryKey), + } + // Store the latest mutation index for each key value + mi, ok := dups[oid] + if !ok { + // First entry for key, store the before value + mi.firstBefore = m.Before + } + mi.lastIdx = i + dups[oid] = mi + } + if len(dups) == len(txn.changes) { + // No duplicates found, fast path return it as is + return txn.changes + } + + // Need to remove the duplicates + cs := make(Changes, 0, len(dups)) + for i, m := range txn.changes { + oid := objectID{ + Table: m.Table, + IndexVal: string(m.primaryKey), + } + mi := dups[oid] + if mi.lastIdx == i { + // This was the latest value for this key copy it with the before value in + // case it's different. Note that m is not a pointer so we are not + // modifying the txn.changeSet here - it's already a copy. + m.Before = mi.firstBefore + + // Edge case - if the object was inserted and then eventually deleted in + // the same transaction, then the net affect on that key is a no-op. Don't + // emit a mutation with nil for before and after as it's meaningless and + // might violate expectations and cause a panic in code that assumes at + // least one must be set. + if m.Before == nil && m.After == nil { + continue + } + cs = append(cs, m) + } + } + // Store the de-duped version in case this is called again + txn.changes = cs + return cs +} + +func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an iterator over the index + indexIter := indexRoot.Iterator() + return indexIter, val, nil +} + +func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.ReverseIterator() + return indexIter, val, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} + +type radixReverseIterator struct { + iter *iradix.ReverseIterator + watchCh <-chan struct{} +} + +func (r *radixReverseIterator) Next() interface{} { + _, value, ok := r.iter.Previous() + if !ok { + return nil + } + return value +} + +func (r *radixReverseIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +// Snapshot creates a snapshot of the current state of the transaction. +// Returns a new read-only transaction or nil if the transaction is already +// aborted or committed. +func (txn *Txn) Snapshot() *Txn { + if txn.rootTxn == nil { + return nil + } + + snapshot := &Txn{ + db: txn.db, + rootTxn: txn.rootTxn.Clone(), + } + + // Commit sub-transactions into the snapshot + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + snapshot.rootTxn.Insert(path, final) + } + + return snapshot +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 000000000..13a4da145 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,152 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. The zero value is not usable. +// Use NewWatchSet to create a WatchSet. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch blocks until one of the channels in the watch set is closed, or +// timeoutCh sends a value. +// Returns true if timeoutCh is what caused Watch to unblock. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx blocks until one of the channels in the watch set is closed, or +// ctx is done (cancelled or exceeds the deadline). WatchCtx returns an error +// if the ctx causes it to unblock, otherwise returns nil. +// +// WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Cancel all watcher goroutines when return. + watcherCtx, cancel := context.WithCancel(ctx) + defer cancel() + + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(watcherCtx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// WatchCh returns a channel that is used to wait for any channel of the watch set to trigger +// or for the context to be cancelled. WatchCh creates a new goroutine each call, so +// callers may need to cache the returned channel to avoid creating extra goroutines. +func (w WatchSet) WatchCh(ctx context.Context) <-chan error { + // Create the outgoing channel + triggerCh := make(chan error, 1) + + // Create a goroutine to collect the error from WatchCtx + go func() { + triggerCh <- w.WatchCtx(ctx) + }() + + return triggerCh +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 000000000..b211eeea2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import ( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..a86c8539e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..92d70934d --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4..af2ef6395 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -27,6 +27,16 @@ Use the links above for more information on each. # changelog +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +46,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 4e92f5998..57d17eeab 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -421,7 +421,9 @@ func (d *compressor) deflateLazy() { d.h = newHuffmanEncoder(maxFlateBlockTokens) } var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { + toIndex := d.window[s.index:d.windowEnd] + toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)] + for _, v := range toIndex { tmp[v]++ } d.h.generate(tmp[:], 15) diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 03a179697..7151140cc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -646,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) + numLiterals, numOffsets := w.indexTokens(tokens, true) extraBits := 0 ssize, storable := w.storedSize(input) @@ -781,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() { // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { +func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) { //copy(w.literalFreq[:], t.litHist[:]) *(*[256]uint16)(w.literalFreq[:]) = t.litHist //copy(w.literalFreq[256:], t.extraHist[:]) @@ -791,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num if t.n == 0 { return } - if filled { - return maxNumLit, maxNumDist + if alwaysEOB { + w.literalFreq[endBlockMarker] = 1 } + // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 90b74f7ac..455ed3e2b 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -61,13 +61,19 @@ var bitWriterPool = sync.Pool{ }, } +// tokensPool contains tokens struct objects that can be reused +var tokensPool = sync.Pool{ + New: func() any { + return &tokens{} + }, +} + // StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. // Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens bw := bitWriterPool.Get().(*huffmanBitWriter) bw.reset(out) defer func() { @@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // For subsequent loops, keep shallow dict reference to avoid alloc+copy. var inDict []byte + dst := tokensPool.Get().(*tokens) + dst.Reset() + defer func() { + tokensPool.Put(dst) + }() + for len(in) > 0 { todo := in if len(inDict) > 0 { @@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } // Compress if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) + statelessEnc(dst, todo, int16(len(dict))) } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) } isEof := eof && len(in) == 0 @@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // If we removed less than 1/16th, huffman compress the block. bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0) } if len(in) > 0 { // Retain a dict if we have more diff --git a/vendor/github.com/operator-framework/api/pkg/lib/release/release.go b/vendor/github.com/operator-framework/api/pkg/lib/release/release.go new file mode 100644 index 000000000..e91adf4c3 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/lib/release/release.go @@ -0,0 +1,73 @@ +package release + +import ( + "encoding/json" + "slices" + "strings" + + semver "github.com/blang/semver/v4" +) + +// +k8s:openapi-gen=true +// OperatorRelease is a wrapper around a slice of semver.PRVersion which supports correct +// marshaling to YAML and JSON. +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:MaxLength=20 +// +kubebuilder:validation:XValidation:rule="self.matches('^[0-9A-Za-z-]+(\\\\.[0-9A-Za-z-]+)*$')",message="release version must be composed of dot-separated identifiers containing only alphanumerics and hyphens" +// +kubebuilder:validation:XValidation:rule="!self.split('.').exists(x, x.matches('^0[0-9]+$'))",message="numeric identifiers in release version must not have leading zeros" +type OperatorRelease struct { + Release []semver.PRVersion `json:"-"` +} + +// DeepCopyInto creates a deep-copy of the Version value. +func (v *OperatorRelease) DeepCopyInto(out *OperatorRelease) { + out.Release = slices.Clone(v.Release) +} + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v OperatorRelease) MarshalJSON() ([]byte, error) { + segments := []string{} + for _, segment := range v.Release { + segments = append(segments, segment.String()) + } + return json.Marshal(strings.Join(segments, ".")) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *OperatorRelease) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + segments := strings.Split(versionString, ".") + for _, segment := range segments { + release, err := semver.NewPRVersion(segment) + if err != nil { + return err + } + v.Release = append(v.Release, release) + } + + return nil +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ OperatorRelease) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// "semver" is not a standard openapi format but tooling may use the value regardless +func (_ OperatorRelease) OpenAPISchemaFormat() string { return "semver" } + +func (r OperatorRelease) String() string { + segments := []string{} + for _, segment := range r.Release { + segments = append(segments, segment.String()) + } + return strings.Join(segments, ".") +} diff --git a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go index 397190a6a..6ed1e4ddf 100644 --- a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go +++ b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go @@ -39,6 +39,7 @@ func (b *bundleLoader) LoadBundle() error { errs = append(errs, b.calculateCompressedBundleSize()) b.addChannelsFromAnnotationsFile() + b.addPackageFromAnnotationsFile() if !b.foundCSV { errs = append(errs, fmt.Errorf("unable to find a csv in bundle directory %s", b.dir)) @@ -68,6 +69,14 @@ func (b *bundleLoader) addChannelsFromAnnotationsFile() { } } +func (b *bundleLoader) addPackageFromAnnotationsFile() { + if b.bundle == nil { + // None of this is relevant if the bundle was not found + return + } + b.bundle.Package = b.annotationsFile.Annotations.PackageName +} + // Compress the bundle to check its size func (b *bundleLoader) calculateCompressedBundleSize() error { if b.bundle == nil { diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go index 3e6d32480..1efb4323c 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/operator-framework/api/pkg/lib/release" "github.com/operator-framework/api/pkg/lib/version" ) @@ -274,8 +275,25 @@ type APIServiceDefinitions struct { // that can manage apps for a given version. // +k8s:openapi-gen=true type ClusterServiceVersionSpec struct { - InstallStrategy NamedInstallStrategy `json:"install"` - Version version.OperatorVersion `json:"version,omitempty"` + InstallStrategy NamedInstallStrategy `json:"install"` + Version version.OperatorVersion `json:"version,omitempty"` + // release specifies the packaging version of the operator, defaulting to empty + // release is optional + // + // A ClusterServiceVersion's release field is used to distinguish between different builds of the same operator version + // This is useful for operators that need to make changes to the CSV which don't affect their functionality, + // for example: + // - to fix a typo in their description + // - to add/amend annotations or labels + // - to amend examples or documentation + // - to produce different builds for different environments + // + // It is up to operator authors to determine the semantics of release versions they use + // for their operator. All release versions must conform to the semver prerelease format + // (dot-separated identifiers containing only alphanumerics and hyphens) and are limited + // to a maximum length of 20 characters. + // +optional + Release release.OperatorRelease `json:"release,omitzero"` Maturity string `json:"maturity,omitempty"` CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"` APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"` @@ -595,6 +613,7 @@ type ResourceInstance struct { // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV" // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV" +// +kubebuilder:printcolumn:name="Release",type=string,JSONPath=`.spec.release`,description="The release of this version of the CSV" // +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces" // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index 684a7432a..685fa26a3 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -501,6 +501,7 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec *out = *in in.InstallStrategy.DeepCopyInto(&out.InstallStrategy) in.Version.DeepCopyInto(&out.Version) + in.Release.DeepCopyInto(&out.Release) in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions) in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) if in.WebhookDefinitions != nil { diff --git a/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go b/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go index 9bb90a04d..c5f7ba18d 100644 --- a/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go +++ b/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go @@ -42,9 +42,26 @@ func validateBundle(bundle *manifests.Bundle) (result errors.ManifestResult) { if sizeErrors != nil { result.Add(sizeErrors...) } + nameErrors := validateBundleName(bundle) + if nameErrors != nil { + result.Add(nameErrors...) + } return result } +func validateBundleName(bundle *manifests.Bundle) []errors.Error { + var errs []errors.Error + // bundle naming with a specified release version must follow the pattern + // -v- + if len(bundle.CSV.Spec.Release.Release) > 0 { + expectedName := fmt.Sprintf("%s-v%s-%s", bundle.Package, bundle.CSV.Spec.Version.String(), bundle.CSV.Spec.Release.String()) + if bundle.Name != expectedName { + errs = append(errs, errors.ErrInvalidBundle(fmt.Sprintf("bundle name with release versioning %q does not match expected name %q", bundle.Name, expectedName), bundle.Name)) + } + } + return errs +} + func validateServiceAccounts(bundle *manifests.Bundle) []errors.Error { // get service account names defined in the csv saNamesFromCSV := make(map[string]struct{}, 0) diff --git a/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go b/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go index 197ef5de8..c5eafd454 100644 --- a/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go +++ b/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go @@ -28,7 +28,7 @@ func checkEmptyFields(result *errors.ManifestResult, v reflect.Value, parentStru // Omitted field tags will contain ",omitempty", and ignored tags will // match "-" exactly, respectively. - isOptionalField := strings.Contains(tag, ",omitempty") || tag == "-" + isOptionalField := strings.Contains(tag, ",omitempty") || strings.Contains(tag, ",omitzero") || tag == "-" emptyVal := fieldValue.IsZero() newParentStructName := fieldType.Name diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 6acf8ab1e..104dc2440 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -57,3 +57,10 @@ linters: - common-false-positives - legacy - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 78088db69..c05fed45a 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -1939,7 +1939,7 @@ type tmplFunc struct { fn func(io.Writer, interface{}) error } -var defaultUsageTemplate = `Usage:{{if .Runnable}} +const defaultUsageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} @@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { return nil } -var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` @@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error { return nil } -var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} ` // defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8409b5f8f..8236c995a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 506ca00b6..4f47117a5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 777e68a7b..259a898ae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 5f78bfdfb..ed2ddce71 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go index 2a0123d4b..48dbd82ae 100644 --- a/vendor/golang.org/x/mod/modfile/print.go +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -33,7 +33,7 @@ type printer struct { } // printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { +func (p *printer) printf(format string, args ...any) { fmt.Fprintf(p, format, args...) } diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index f58de029e..504a2f1df 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -600,7 +600,7 @@ func (in *input) readToken() { // Checked all punctuation. Must be identifier token. if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) + in.Error(fmt.Sprintf("unexpected input character %#q", rune(c))) } // Scan over identifier. diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index a86ee4fd8..c5b8305de 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -368,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -574,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V Err: err, } } - errorf := func(format string, args ...interface{}) *Error { + errorf := func(format string, args ...any) *Error { return wrapError(fmt.Errorf(format, args...)) } @@ -685,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -1594,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error { r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") } if rationale != "" { - for _, line := range strings.Split(rationale, "\n") { + for line := range strings.SplitSeq(rationale, "\n") { com := Comment{Token: "// " + line} r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 9d3955bd7..739c13f48 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -802,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool { for globs != "" { // Extract next non-empty glob in comma-separated list. var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] + if before, after, ok := strings.Cut(globs, ","); ok { + glob, globs = before, after } else { glob, globs = globs, "" } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 1965913e5..ccb87e6da 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -376,11 +376,24 @@ type ClientConn struct { // completely unresponsive connection. pendingResets int + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -710,7 +723,7 @@ func canRetryError(err error) bool { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse) + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -720,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -772,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { conf := configFromTransport(t) cc := &ClientConn{ t: t, @@ -797,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), lastActive: time.Now(), + internalStateHook: internalStateHook, } if t.transportTestHooks != nil { t.transportTestHooks.newclientconn(cc) @@ -1037,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - !cc.doNotReuse && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). @@ -1056,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + // currentRequestCountLocked reports the number of concurrency slots currently in use, // including active streams, reserved slots, and reset streams waiting for acknowledgement. func (cc *ClientConn) currentRequestCountLocked() int { @@ -1067,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1091,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1616,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } bodyClosed := cs.reqBodyClosed closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1647,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // // This could be due to the server becoming unresponsive. // To avoid sending too many requests on a dead connection, - // we let the request continue to consume a concurrency slot - // until we can confirm the server is still responding. + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. // We do this by sending a PING frame along with the RST_STREAM // (unless a ping is already in flight). // @@ -1659,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // because it's short lived and will probably be closed before // we get the ping response. ping := false - if !closeOnIdle { + if !closeOnIdle && !readSinceStream { cc.mu.Lock() // rstStreamPingsBlocked works around a gRPC behavior: // see comment on the field for details. @@ -1693,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. @@ -2745,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt // See comment on ClientConn.rstStreamPingsBlocked for details. rl.cc.rstStreamPingsBlocked = false } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2795,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -2975,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3198,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3211,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index 3aaffdd1f..c2b3c0098 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { Buckets: buckets, } - data.Families = make([]string, 0, len(families)) famMu.RLock() + data.Families = make([]string, 0, len(families)) for name := range families { data.Families = append(data.Families, name) } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 2f45dbc86..f69fd7546 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -144,8 +144,8 @@ func (g *Group) SetLimit(n int) { g.sem = nil return } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) } g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 34c9ae76e..63541994e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -92,9 +92,6 @@ var ARM64 struct { HasSHA2 bool // SHA2 hardware implementation HasCRC32 bool // CRC32 hardware implementation HasATOMICS bool // Atomic memory operation instruction set - HasHPDS bool // Hierarchical permission disables in translations tables - HasLOR bool // Limited ordering regions - HasPAN bool // Privileged access never HasFPHP bool // Half precision floating-point instruction set HasASIMDHP bool // Advanced SIMD half precision instruction set HasCPUID bool // CPUID identification scheme registers diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f449c679f..af2aa99f9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -65,10 +65,10 @@ func setMinimalFeatures() { func readARM64Registers() { Initialized = true - parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) } -func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: @@ -152,22 +152,6 @@ func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { ARM64.HasI8MM = true } - // ID_AA64MMFR1_EL1 - switch extractBits(mmfr1, 12, 15) { - case 1, 2: - ARM64.HasHPDS = true - } - - switch extractBits(mmfr1, 16, 19) { - case 1: - ARM64.HasLOR = true - } - - switch extractBits(mmfr1, 20, 23) { - case 1, 2, 3: - ARM64.HasPAN = true - } - // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index a4f24b3b0..3b0450a06 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -20,13 +20,6 @@ TEXT ·getisar1(SB),NOSPLIT,$0-8 MOVD R0, ret+0(FP) RET -// func getmmfr1() uint64 -TEXT ·getmmfr1(SB),NOSPLIT,$0-8 - // get Memory Model Feature Register 1 into x0 - MRS ID_AA64MMFR1_EL1, R0 - MOVD R0, ret+0(FP) - RET - // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index e3fc5a8d3..6ac6e1efb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -8,6 +8,5 @@ package cpu func getisar0() uint64 func getisar1() uint64 -func getmmfr1() uint64 func getpfr0() uint64 func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 8df2079e1..7f1946780 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -8,5 +8,4 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } -func getmmfr1() uint64 { return 0 } func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go index 19aea0633..ebfb3fc8e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -167,7 +167,7 @@ func doinit() { setMinimalFeatures() return } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) Initialized = true } diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go index 87fd3a778..85b64d5cc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -59,7 +59,7 @@ func doinit() { if !ok { return } - parseARM64SystemRegisters(isar0, isar1, 0, 0) + parseARM64SystemRegisters(isar0, isar1, 0) Initialized = true } diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 42517077c..fd39be4ef 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -256,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -613,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index d0a75da57..120a7b35d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1615,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fbc..97a61fc5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34ae..a0d6d498c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c12..dd9c903f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba1..384c61ca3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c0..6384c9831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3c..553c1c6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a63..b3339f209 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a3033..177091d2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c87..c5abf156d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb0..f1f3fadf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce2409..203ad9c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb5..4b9abcb21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e9526..f87983037 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7c..64347eb35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e6..7d7191171 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9..50e8e6449 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go index dd99ad14d..ce28c9062 100644 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -60,9 +60,9 @@ func (utf8bomEncoding) NewDecoder() *encoding.Decoder { } var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, + Encoding: &internal.SimpleEncoding{Decoder: utf8Decoder{}, Encoder: runes.ReplaceIllFormed()}, + Name: "UTF-8", + MIB: identifier.UTF8, } type utf8bomDecoder struct { diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 0180a341e..bc15ef8b9 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -27,6 +27,7 @@ package unitchecker // printf checker. import ( + "archive/zip" "encoding/gob" "encoding/json" "flag" @@ -74,6 +75,7 @@ type Config struct { VetxOnly bool // run analysis only for facts, not diagnostics VetxOutput string // where to write file of fact information Stdout string // write stdout (e.g. JSON, unified diff) to this file + FixArchive string // write fixed files to this zip archive, if non-empty SucceedOnTypecheckFailure bool // obsolete awful hack; see #18395 and below } @@ -153,7 +155,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { // In VetxOnly mode, the analysis is run only for facts. if !cfg.VetxOnly { - code = processResults(fset, cfg.ID, results) + code = processResults(fset, cfg.ID, cfg.FixArchive, results) } os.Exit(code) @@ -177,7 +179,7 @@ func readConfig(filename string) (*Config, error) { return cfg, nil } -func processResults(fset *token.FileSet, id string, results []result) (exit int) { +func processResults(fset *token.FileSet, id, fixArchive string, results []result) (exit int) { if analysisflags.Fix { // Don't print the diagnostics, // but apply all fixes from the root actions. @@ -194,7 +196,40 @@ func processResults(fset *token.FileSet, id string, results []result) (exit int) Diagnostics: res.diagnostics, } } - if err := driverutil.ApplyFixes(fixActions, analysisflags.Diff, false); err != nil { + + // By default, fixes overwrite the original file. + // With the -diff flag, print the diffs to stdout. + // If "go fix" provides a fix archive, we write files + // into it so that mutations happen after the build. + write := func(filename string, content []byte) error { + return os.WriteFile(filename, content, 0644) + } + if fixArchive != "" { + f, err := os.Create(fixArchive) + if err != nil { + log.Fatalf("can't create -fix archive: %v", err) + } + zw := zip.NewWriter(f) + zw.SetComment(id) // ignore error + defer func() { + if err := zw.Close(); err != nil { + log.Fatalf("closing -fix archive zip writer: %v", err) + } + if err := f.Close(); err != nil { + log.Fatalf("closing -fix archive file: %v", err) + } + }() + write = func(filename string, content []byte) error { + f, err := zw.Create(filename) + if err != nil { + return err + } + _, err = f.Write(content) + return err + } + } + + if err := driverutil.ApplyFixes(fixActions, write, analysisflags.Diff, false); err != nil { // Fail when applying fixes failed. log.Print(err) exit = 1 diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 5bacc0fa4..adb471101 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "reflect" "slices" "strconv" "strings" @@ -149,7 +150,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added if newImport.Name != nil { newImport.Name.NamePos = pos } - newImport.Path.ValuePos = pos + updateBasicLitPos(newImport.Path, pos) newImport.EndPos = pos // Clean up parens. impDecl contains at least one spec. @@ -184,7 +185,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added first.Lparen = first.Pos() // Move the imports of the other import declaration to the first one. for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + updateBasicLitPos(spec.(*ast.ImportSpec).Path, first.Pos()) first.Specs = append(first.Specs, spec) } f.Decls = slices.Delete(f.Decls, i, i+1) @@ -470,3 +471,17 @@ func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { return groups } + +// updateBasicLitPos updates lit.Pos, +// ensuring that lit.End (if set) is displaced by the same amount. +// (See https://go.dev/issue/76395.) +func updateBasicLitPos(lit *ast.BasicLit, pos token.Pos) { + len := lit.End() - lit.Pos() + lit.ValuePos = pos + // TODO(adonovan): after go1.26, simplify to: + // lit.ValueEnd = pos + len + v := reflect.ValueOf(lit).Elem().FieldByName("ValueEnd") + if v.IsValid() && v.Int() != 0 { + v.SetInt(int64(pos + len)) + } +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 060ab08ef..ff607389d 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -1027,11 +1027,15 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { - // Fill in the blanks to avoid surprises. + // To avoid surprises, fill in the blanks consistent + // with other packages. (For example, some analyzers + // assert that each needed types.Info map is non-nil + // even when there is no syntax that would cause them + // to consult the map.) lpkg.Types = types.Unsafe lpkg.Fset = ld.Fset lpkg.Syntax = []*ast.File{} - lpkg.TypesInfo = new(types.Info) + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes return } @@ -1180,20 +1184,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - // Populate TypesInfo only if needed, as it - // causes the type checker to work much harder. - if ld.Config.Mode&NeedTypesInfo != 0 { - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - FileVersions: make(map[*ast.File]string), - } - } + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1307,6 +1298,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.IllTyped = illTyped } +func (ld *loader) newTypesInfo() *types.Info { + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo == 0 { + return nil + } + return &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } +} + // An importFunc is an implementation of the single-method // types.Importer interface based on a function value. type importerFunc func(path string) (*types.Package, error) diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 5f10f56cb..3d24a8c63 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -12,6 +12,7 @@ import ( // Callee returns the named target of a function call, if any: // a function, method, builtin, or variable. +// It returns nil for a T(x) conversion. // // Functions and methods may potentially have type parameters. // diff --git a/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go b/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go index 763650c74..7769b39be 100644 --- a/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go +++ b/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go @@ -93,11 +93,13 @@ type FixAction struct { // // If printDiff (from the -diff flag) is set, instead of updating the // files it display the final patch composed of all the cleanly merged -// fixes. +// fixes. (It is tempting to factor printDiff as just a variant of +// writeFile that is provided the old and new content, but it's hard +// to generate a good summary that way.) // // TODO(adonovan): handle file-system level aliases such as symbolic // links using robustio.FileID. -func ApplyFixes(actions []FixAction, printDiff, verbose bool) error { +func ApplyFixes(actions []FixAction, writeFile func(filename string, content []byte) error, printDiff, verbose bool) error { generated := make(map[*token.File]bool) // Select fixes to apply. @@ -160,7 +162,9 @@ func ApplyFixes(actions []FixAction, printDiff, verbose bool) error { var ( accumulatedEdits = make(map[string][]diff.Edit) filePkgs = make(map[string]*types.Package) // maps each file to an arbitrary package that includes it - goodFixes = 0 + + goodFixes = 0 // number of fixes cleanly applied + skippedFixes = 0 // number of fixes skipped (because e.g. edits a generated file) ) fixloop: for _, fixact := range fixes { @@ -168,6 +172,7 @@ fixloop: for _, edit := range fixact.fix.TextEdits { file := fixact.act.FileSet.File(edit.Pos) if generated[file] { + skippedFixes++ continue fixloop } } @@ -227,7 +232,7 @@ fixloop: log.Printf("%s: fix %s applied", fixact.act.Name, fixact.fix.Message) } } - badFixes := len(fixes) - goodFixes + badFixes := len(fixes) - goodFixes - skippedFixes // number of fixes that could not be applied // Show diff or update files to final state. var files []string @@ -261,12 +266,11 @@ fixloop: os.Stdout.WriteString(unified) } else { - // write + // write file totalFiles++ - // TODO(adonovan): abstract the I/O. - if err := os.WriteFile(file, final, 0644); err != nil { + if err := writeFile(file, final); err != nil { log.Println(err) - continue + continue // (causes ApplyFix to return an error) } filesUpdated++ } @@ -305,15 +309,25 @@ fixloop: // TODO(adonovan): should we log that n files were updated in case of total victory? if badFixes > 0 || filesUpdated < totalFiles { if printDiff { - return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes)) + return fmt.Errorf("%d of %s skipped (e.g. due to conflicts)", + badFixes, + plural(len(fixes), "fix", "fixes")) } else { - return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)", - goodFixes, len(fixes), filesUpdated) + return fmt.Errorf("applied %d of %s; %s updated. (Re-run the command to apply more.)", + goodFixes, + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } } if verbose { - log.Printf("applied %d fixes, updated %d files", len(fixes), filesUpdated) + if skippedFixes > 0 { + log.Printf("skipped %s that would edit generated files", + plural(skippedFixes, "fix", "fixes")) + } + log.Printf("applied %s, updated %s", + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } return nil @@ -326,6 +340,9 @@ fixloop: // information for the fixed file and thus cannot accurately tell // whether k is among the free names of T{k: 0}, which requires // knowledge of whether T is a struct type. +// +// Like [imports.Process] (the core of x/tools/cmd/goimports), it also +// merges import decls. func FormatSourceRemoveImports(pkg *types.Package, src []byte) ([]byte, error) { // This function was reduced from the "strict entire file" // path through [format.Source]. @@ -340,6 +357,10 @@ func FormatSourceRemoveImports(pkg *types.Package, src []byte) ([]byte, error) { removeUnneededImports(fset, pkg, file) + // TODO(adonovan): to generate cleaner edits when adding an import, + // consider adding a call to imports.mergeImports; however, it does + // cause comments to migrate. + // printerNormalizeNumbers means to canonicalize number literal prefixes // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. // @@ -425,3 +446,12 @@ func removeUnneededImports(fset *token.FileSet, pkg *types.Package, file *ast.Fi del() } } + +// plural returns "n nouns", selecting the plural form as approriate. +func plural(n int, singular, plural string) string { + if n == 1 { + return "1 " + singular + } else { + return fmt.Sprintf("%d %s", n, plural) + } +} diff --git a/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go b/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go index 7fc42a5ef..545884685 100644 --- a/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go +++ b/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go @@ -7,6 +7,7 @@ package driverutil // This file defined output helpers common to all drivers. import ( + "cmp" "encoding/json" "fmt" "go/token" @@ -76,11 +77,10 @@ type JSONSuggestedFix struct { } // A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. -// -// TODO(matloob): include End position if present. type JSONDiagnostic struct { Category string `json:"category,omitempty"` Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` // (ditto) Message string `json:"message"` SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` Related []JSONRelatedInformation `json:"related,omitempty"` @@ -88,10 +88,9 @@ type JSONDiagnostic struct { // A JSONRelated describes a secondary position and message related to // a primary diagnostic. -// -// TODO(adonovan): include End position if present. type JSONRelatedInformation struct { Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` // (ditto) Message string `json:"message"` } @@ -127,12 +126,14 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. for _, r := range f.Related { related = append(related, JSONRelatedInformation{ Posn: fset.Position(r.Pos).String(), + End: fset.Position(cmp.Or(r.End, r.Pos)).String(), Message: r.Message, }) } jdiag := JSONDiagnostic{ Category: f.Category, Posn: fset.Position(f.Pos).String(), + End: fset.Position(cmp.Or(f.End, f.Pos)).String(), Message: f.Message, SuggestedFixes: fixes, Related: related, diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 7b7c5cc67..5acc68e1d 100644 --- a/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -16,10 +16,6 @@ type Diff struct { ReplStart, ReplEnd int // offset of replacement text in B } -// DiffStrings returns the differences between two strings. -// It does not respect rune boundaries. -func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } - // DiffBytes returns the differences between two byte sequences. // It does not respect rune boundaries. func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go index 05f3a9a57..16ae6bb02 100644 --- a/vendor/golang.org/x/tools/internal/event/core/export.go +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -8,7 +8,6 @@ import ( "context" "sync/atomic" "time" - "unsafe" "golang.org/x/tools/internal/event/label" ) @@ -17,23 +16,21 @@ import ( // It may return a modified context and event. type Exporter func(context.Context, Event, label.Map) context.Context -var ( - exporter unsafe.Pointer -) +var exporter atomic.Pointer[Exporter] // SetExporter sets the global exporter function that handles all events. // The exporter is called synchronously from the event call site, so it should // return quickly so as not to hold up user code. func SetExporter(e Exporter) { - p := unsafe.Pointer(&e) if e == nil { // &e is always valid, and so p is always valid, but for the early abort // of ProcessEvent to be efficient it needs to make the nil check on the // pointer without having to dereference it, so we make the nil function // also a nil pointer - p = nil + exporter.Store(nil) + } else { + exporter.Store(&e) } - atomic.StorePointer(&exporter, p) } // deliver is called to deliver an event to the supplied exporter. @@ -48,7 +45,7 @@ func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { // Export is called to deliver an event to the global exporter if set. func Export(ctx context.Context, ev Event) context.Context { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx } @@ -61,7 +58,7 @@ func Export(ctx context.Context, ev Event) context.Context { // It will fill in the time. func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx, func() {} } diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 92a391057..c37584af9 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -7,7 +7,6 @@ package label import ( "fmt" "io" - "reflect" "slices" "unsafe" ) @@ -103,11 +102,10 @@ type stringptr unsafe.Pointer // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. func OfString(k Key, v string) Label { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) return Label{ key: k, - packed: uint64(hdr.Len), - untyped: stringptr(hdr.Data), + packed: uint64(len(v)), + untyped: stringptr(unsafe.StringData(v)), } } @@ -116,11 +114,7 @@ func OfString(k Key, v string) Label { // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. func (t Label) UnpackString() string { - var v string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) - hdr.Data = uintptr(t.untyped.(stringptr)) - hdr.Len = int(t.packed) - return v + return unsafe.String((*byte)(t.untyped.(stringptr)), int(t.packed)) } // Valid returns true if the Label is a valid one (it has a key). diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 581784da4..f7b9c1286 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,360 +12,364 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03n\x84\x01D\x14"}, - {"bytes", "q*Z\x03\fG\x02\x02"}, + {"archive/tar", "\x03p\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04f\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03p\x86\x01D\x14"}, + {"bytes", "s+[\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xf1\x01A"}, - {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, - {"compress/lzw", "\x02o\x03\x81\x01"}, - {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, - {"container/heap", "\xb7\x02"}, + {"compress/bzip2", "\x02\x02\xf5\x01A"}, + {"compress/flate", "\x02q\x03\x83\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04f\a\x03\x15nT"}, + {"compress/lzw", "\x02q\x03\x83\x01"}, + {"compress/zlib", "\x02\x04f\a\x03\x13\x01o"}, + {"container/heap", "\xbb\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "q[o\x01\r"}, - {"crypto", "\x86\x01oC"}, - {"crypto/aes", "\x10\n\t\x95\x02"}, - {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, - {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, - {"crypto/dsa", "D\x04)\x84\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, - {"crypto/elliptic", "2?\x84\x01\r9"}, + {"context", "s\\p\x01\r"}, + {"crypto", "\x89\x01pC"}, + {"crypto/aes", "\x10\n\t\x99\x02"}, + {"crypto/cipher", "\x03 \x01\x01 \x12\x1c,Z"}, + {"crypto/des", "\x10\x15 .,\x9d\x01\x03"}, + {"crypto/dsa", "E\x04*\x86\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x17\x04\x0e\x1c\x86\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\b\v\x06\x01\x04\r\x01\x1c\x86\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x12\a\v\a\x1c\x86\x01C"}, + {"crypto/elliptic", "3@\x86\x01\r9"}, {"crypto/fips140", "\"\x05"}, - {"crypto/hkdf", "/\x14\x01-\x15"}, - {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\ri"}, - {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, - {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, + {"crypto/hkdf", "/\x15\x01.\x16"}, + {"crypto/hmac", "\x1a\x16\x14\x01\x122"}, + {"crypto/internal/boring", "\x0e\x02\rl"}, + {"crypto/internal/boring/bbig", "\x1a\xec\x01M"}, + {"crypto/internal/boring/bcache", "\xc0\x02\x13"}, {"crypto/internal/boring/sig", ""}, {"crypto/internal/constanttime", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "I"}, - {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, - {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, - {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, - {"crypto/internal/fips140/alias", "\xcf\x02"}, - {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, - {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, - {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, - {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, - {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, - {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, - {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, - {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, - {"crypto/internal/fips140/ssh", "'_"}, - {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, - {"crypto/internal/fips140cache", "\xae\x02\r&"}, + {"crypto/internal/cryptotest", "\x03\r\n\b&\x0f\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "J"}, + {"crypto/internal/entropy/v1.0.0", "C0\x95\x018\x13"}, + {"crypto/internal/fips140", "B1\xbf\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x14\x05\x01\x01\x06+\x95\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x12\x05\x01\a+\x92\x01"}, + {"crypto/internal/fips140/alias", "\xd3\x02"}, + {"crypto/internal/fips140/bigmod", "'\x19\x01\a+\x95\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\a\t\x02\xb7\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x8b\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x14\x05\t\x01)\x86\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\n\r3\x86\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x03\x06:\x16pF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\f:\xc9\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x123\x95\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x14\x053\x95\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\a<\x16"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x15\x01\x01:\x16"}, + {"crypto/internal/fips140/mldsa", "\x03\x1b\x04\x05\x02\x0e\x01\x03\x053\x95\x017"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0f\x03\x053\xcc\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\r\f3\x95\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x148\x95\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\a<\x16"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1e\x01\a+\x16\x7f"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x19\x05\x012\x95\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1e\x01\a+\x16\x7f"}, + {"crypto/internal/fips140/ssh", "'b"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1b\xc8\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\a\x02:\x16"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\b\t3\x16"}, + {"crypto/internal/fips140cache", "\xb2\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, - {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, - {"crypto/internal/fips140deps/time", "\xc9\x02"}, - {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, - {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, + {"crypto/internal/fips140deps/byteorder", "\x9f\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb4\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xbc\x01"}, + {"crypto/internal/fips140deps/time", "\xcd\x02"}, + {"crypto/internal/fips140hash", "8\x1d4\xca\x01"}, + {"crypto/internal/fips140only", ")\x0e\x01\x01P3="}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, - {"crypto/internal/impl", "\xb9\x02"}, - {"crypto/internal/randutil", "\xf5\x01\x12"}, - {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "q"}, - {"crypto/md5", "\x0e6-\x15\x16h"}, - {"crypto/mlkem", "1"}, - {"crypto/pbkdf2", "4\x0f\x01-\x15"}, - {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, - {"crypto/rc4", "%\x1f-\xc7\x01"}, - {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, - {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, - {"crypto/sha256", "\x0e\f\x1cP"}, - {"crypto/sha3", "\x0e)O\xc9\x01"}, - {"crypto/sha512", "\x0e\f\x1eN"}, - {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, - {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, - {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, - {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, - {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, - {"debug/plan9obj", "j\a\x03e\x1c,"}, - {"embed", "q*A\x19\x01S"}, + {"crypto/internal/hpke", "\x03\v\x01\x01\x03\x055\x03\x04\x01\x01\x16\a\x03\x13\xcc\x01"}, + {"crypto/internal/impl", "\xbd\x02"}, + {"crypto/internal/randutil", "\xf9\x01\x12"}, + {"crypto/internal/sysrand", "sq! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "s"}, + {"crypto/md5", "\x0e7.\x16\x16i"}, + {"crypto/mlkem", "\x0e$"}, + {"crypto/mlkem/mlkemtest", "2\x1b&"}, + {"crypto/pbkdf2", "5\x0f\x01.\x16"}, + {"crypto/rand", "\x1a\b\a\x1c\x04\x01)\x86\x01\rM"}, + {"crypto/rc4", "% .\xc9\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x10\x0e\x01\x04\a\a\x1c\x03\x133=\f\x01"}, + {"crypto/sha1", "\x0e\f+\x03+\x16\x16\x15T"}, + {"crypto/sha256", "\x0e\f\x1dR"}, + {"crypto/sha3", "\x0e*Q\xca\x01"}, + {"crypto/sha512", "\x0e\f\x1fP"}, + {"crypto/subtle", "\x1e\x1d\x9f\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\x0e\n\x01\n\x05\x04\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa9\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x016\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "i\x06\a\x90\x01G"}, + {"database/sql", "\x03\nP\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rf\x03\xb7\x01\x0f\x11"}, + {"debug/buildinfo", "\x03]\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03i\a\x03\x83\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06V\r\a\x03g\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03i\n\xc5\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06V\r\ng\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06V\r\a\x03g\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "l\a\x03g\x1c,"}, + {"embed", "s+B\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf5\x01C"}, - {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf5\x01A\x02"}, - {"encoding/base64", "\x9c\x01YA\x02"}, - {"encoding/binary", "q\x84\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, - {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "q\x03\x81\x01A\x03"}, - {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03f\b\x84\x01A\x03"}, - {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xcc\x01\x83\x01"}, - {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, - {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, - {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03q\x01\v\x01\x02rD"}, - {"go/importer", "v\a\x01\x01\x04\x01q9"}, - {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, - {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, - {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, - {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbe\x01{"}, - {"hash", "\xf5\x01"}, - {"hash/adler32", "q\x15\x16"}, - {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, - {"hash/crc64", "q\x15\x16\x9f\x01"}, - {"hash/fnv", "q\x15\x16h"}, - {"hash/maphash", "\x86\x01\x11<|"}, - {"html", "\xb9\x02\x02\x12"}, - {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02o\x1ef\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf9\x01C"}, + {"encoding/asn1", "\x03p\x03g(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf9\x01A\x02"}, + {"encoding/base64", "\x9f\x01ZA\x02"}, + {"encoding/binary", "s\x86\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01p\x03\x83\x01D\x12\x02"}, + {"encoding/gob", "\x02e\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "s\x03\x83\x01A\x03"}, + {"encoding/json", "\x03\x01c\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03h\b\x86\x01A\x03"}, + {"encoding/xml", "\x02\x01d\f\x03\x83\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcf\x01\x84\x01"}, + {"expvar", "pLA\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "g\f\x03\x83\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "sF'\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01r\x0f\x01s\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01p\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "s\xc9\x01\x01\x12\x02"}, + {"go/constant", "v\x10\x7f\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04r\x01\x05\n=61\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03s\xc4\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03s\x01\f\x01\x02sD"}, + {"go/importer", "x\a\x01\x02\x04\x01r9"}, + {"go/internal/gccgoimporter", "\x02\x01]\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02t\x10\x010\x05\r0,\x15\x03\x02"}, + {"go/internal/scannerhooks", "\x86\x01"}, + {"go/internal/srcimporter", "v\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03p\x03\x01\x02\b\x04\x01s\x01+\x06\x12"}, + {"go/printer", "v\x01\x02\x03\ns\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03s\v\x05s2\x10\x01\x13\x02"}, + {"go/token", "\x04r\x86\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06i\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xc1\x01|"}, + {"hash", "\xf9\x01"}, + {"hash/adler32", "s\x16\x16"}, + {"hash/crc32", "s\x16\x16\x15\x8b\x01\x01\x13"}, + {"hash/crc64", "s\x16\x16\xa0\x01"}, + {"hash/fnv", "s\x16\x16i"}, + {"hash/maphash", "\x89\x01\x11<}"}, + {"html", "\xbd\x02\x02\x12"}, + {"html/template", "\x03m\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02q\x1fg\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8f\x01"}, - {"image/draw", "\x8e\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, - {"image/internal/imageutil", "\x8e\x01"}, - {"image/jpeg", "\x02o\x1d\x01\x04b"}, - {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, - {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, - {"internal/abi", "\xb8\x01\x97\x01"}, - {"internal/asan", "\xcf\x02"}, - {"internal/bisect", "\xae\x02\r\x01"}, - {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xb1\x01\x9e\x01"}, + {"image/color/palette", "\x92\x01"}, + {"image/draw", "\x91\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05k\x03\x1b\x01\x01\x01\vZ\x0f"}, + {"image/internal/imageutil", "\x91\x01"}, + {"image/jpeg", "\x02q\x1e\x01\x04c"}, + {"image/png", "\x02\ac\n\x13\x02\x06\x01gC"}, + {"index/suffixarray", "\x03i\a\x86\x01\f+\n\x01"}, + {"internal/abi", "\xbb\x01\x98\x01"}, + {"internal/asan", "\xd3\x02"}, + {"internal/bisect", "\xb2\x02\r\x01"}, + {"internal/buildcfg", "vHg\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb4\x01\x9f\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, + {"internal/cgrouptest", "v[T\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9f\x01\x15\a\x98\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "t-`"}, - {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, - {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, - {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcf\x02"}, - {"internal/coverage/slicereader", "j\n\x81\x01Z"}, - {"internal/coverage/slicewriter", "t\x81\x01"}, - {"internal/coverage/stringtab", "t8\x04E"}, + {"internal/coverage/cfile", "p\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04r.\x04Q\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "v.a"}, + {"internal/coverage/decodecounter", "l\n.\v\x02H,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02j\n\x17\x17\v\x02H,"}, + {"internal/coverage/encodecounter", "\x02j\n.\f\x01\x02F\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01i\n\x13\x04\x17\r\x02F,."}, + {"internal/coverage/pods", "\x04r.\x81\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xd3\x02"}, + {"internal/coverage/slicereader", "l\n\x83\x01Z"}, + {"internal/coverage/slicewriter", "v\x83\x01"}, + {"internal/coverage/stringtab", "v9\x04F"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcf\x02"}, - {"internal/dag", "\x04p\xc2\x01\x03"}, - {"internal/diff", "\x03q\xc3\x01\x02"}, - {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "q*A\x1a@"}, - {"internal/fmtsort", "\x04\xa5\x02\r"}, - {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xd3\x02"}, + {"internal/dag", "\x04r\xc4\x01\x03"}, + {"internal/diff", "\x03s\xc5\x01\x02"}, + {"internal/exportdata", "\x02\x01p\x03\x02e\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "s+B\x1a@"}, + {"internal/fmtsort", "\x04\xa9\x02\r"}, + {"internal/fuzz", "\x03\nG\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, + {"internal/godebug", "\x9c\x01!\x82\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa5\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, - {"internal/msan", "\xcf\x02"}, + {"internal/lazyregexp", "\xa5\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf9\x01,\x18\x02\f"}, + {"internal/msan", "\xd3\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "i\x8c\x01,"}, - {"internal/oserror", "q"}, - {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "k\x8e\x01,"}, + {"internal/oserror", "s"}, + {"internal/pkgbits", "\x03Q\x18\a\x03\x04\fs\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, + {"internal/poll", "sl\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04l\x03\x83\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x97\x01\xb8\x01"}, - {"internal/reflectlite", "\x97\x01!:\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, - {"weak", "\x97\x01\x97\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xd3\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "W\x15\x9c\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "s\xc7\x01"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8f\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "s\x03\x99\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03p\x03\x83\x01F"}, + {"vendor/golang.org/x/net/idna", "v\x8f\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03i\a\x03\x83\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa5\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "s\xde\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03p\x86\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bk\x87\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "l\n\x83\x01F\x12\x11"}, + {"weak", "\x9a\x01\x98\x01!"}, } // bootstrap is the list of bootstrap packages extracted from cmd/dist. @@ -385,6 +389,7 @@ var bootstrap = map[string]bool{ "cmd/compile/internal/arm64": true, "cmd/compile/internal/base": true, "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/bloop": true, "cmd/compile/internal/compare": true, "cmd/compile/internal/coverage": true, "cmd/compile/internal/deadlocals": true, @@ -413,6 +418,7 @@ var bootstrap = map[string]bool{ "cmd/compile/internal/riscv64": true, "cmd/compile/internal/rttype": true, "cmd/compile/internal/s390x": true, + "cmd/compile/internal/slice": true, "cmd/compile/internal/ssa": true, "cmd/compile/internal/ssagen": true, "cmd/compile/internal/staticdata": true, diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 362f23c43..f1e24625a 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -16,6 +16,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Write", Method, 0, ""}, {"(*Writer).WriteHeader", Method, 0, ""}, + {"(FileInfoNames).Gname", Method, 23, ""}, + {"(FileInfoNames).IsDir", Method, 23, ""}, + {"(FileInfoNames).ModTime", Method, 23, ""}, + {"(FileInfoNames).Mode", Method, 23, ""}, + {"(FileInfoNames).Name", Method, 23, ""}, + {"(FileInfoNames).Size", Method, 23, ""}, + {"(FileInfoNames).Sys", Method, 23, ""}, + {"(FileInfoNames).Uname", Method, 23, ""}, {"(Format).String", Method, 10, ""}, {"ErrFieldTooLong", Var, 0, ""}, {"ErrHeader", Var, 0, ""}, @@ -338,6 +346,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Write", Method, 0, ""}, {"(CorruptInputError).Error", Method, 0, ""}, {"(InternalError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"CorruptInputError", Type, 0, ""}, @@ -409,6 +420,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Reset", Method, 2, ""}, {"(*Writer).Write", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"DefaultCompression", Const, 0, ""}, @@ -426,6 +438,11 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0, ""}, }, "container/heap": { + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Pop", Method, 0, ""}, + {"(Interface).Push", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"Fix", Func, 2, "func(h Interface, i int)"}, {"Init", Func, 0, "func(h Interface)"}, {"Interface", Type, 0, ""}, @@ -469,6 +486,10 @@ var PackageSymbols = map[string][]Symbol{ {"Ring.Value", Field, 0, ""}, }, "context": { + {"(Context).Deadline", Method, 7, ""}, + {"(Context).Done", Method, 7, ""}, + {"(Context).Err", Method, 7, ""}, + {"(Context).Value", Method, 7, ""}, {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"}, {"Background", Func, 7, "func() Context"}, {"CancelCauseFunc", Type, 20, ""}, @@ -488,17 +509,31 @@ var PackageSymbols = map[string][]Symbol{ {"WithoutCancel", Func, 21, "func(parent Context) Context"}, }, "crypto": { + {"(Decapsulator).Decapsulate", Method, 26, ""}, + {"(Decapsulator).Encapsulator", Method, 26, ""}, + {"(Decrypter).Decrypt", Method, 5, ""}, + {"(Decrypter).Public", Method, 5, ""}, + {"(Encapsulator).Bytes", Method, 26, ""}, + {"(Encapsulator).Encapsulate", Method, 26, ""}, {"(Hash).Available", Method, 0, ""}, {"(Hash).HashFunc", Method, 4, ""}, {"(Hash).New", Method, 0, ""}, {"(Hash).Size", Method, 0, ""}, {"(Hash).String", Method, 15, ""}, + {"(MessageSigner).Public", Method, 25, ""}, + {"(MessageSigner).Sign", Method, 25, ""}, + {"(MessageSigner).SignMessage", Method, 25, ""}, + {"(Signer).Public", Method, 4, ""}, + {"(Signer).Sign", Method, 4, ""}, + {"(SignerOpts).HashFunc", Method, 4, ""}, {"BLAKE2b_256", Const, 9, ""}, {"BLAKE2b_384", Const, 9, ""}, {"BLAKE2b_512", Const, 9, ""}, {"BLAKE2s_256", Const, 9, ""}, + {"Decapsulator", Type, 26, ""}, {"Decrypter", Type, 5, ""}, {"DecrypterOpts", Type, 5, ""}, + {"Encapsulator", Type, 26, ""}, {"Hash", Type, 0, ""}, {"MD4", Const, 0, ""}, {"MD5", Const, 0, ""}, @@ -530,6 +565,16 @@ var PackageSymbols = map[string][]Symbol{ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, }, "crypto/cipher": { + {"(AEAD).NonceSize", Method, 2, ""}, + {"(AEAD).Open", Method, 2, ""}, + {"(AEAD).Overhead", Method, 2, ""}, + {"(AEAD).Seal", Method, 2, ""}, + {"(Block).BlockSize", Method, 0, ""}, + {"(Block).Decrypt", Method, 0, ""}, + {"(Block).Encrypt", Method, 0, ""}, + {"(BlockMode).BlockSize", Method, 0, ""}, + {"(BlockMode).CryptBlocks", Method, 0, ""}, + {"(Stream).XORKeyStream", Method, 0, ""}, {"(StreamReader).Read", Method, 0, ""}, {"(StreamWriter).Close", Method, 0, ""}, {"(StreamWriter).Write", Method, 0, ""}, @@ -594,7 +639,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*PublicKey).Bytes", Method, 20, ""}, {"(*PublicKey).Curve", Method, 20, ""}, {"(*PublicKey).Equal", Method, 20, ""}, - {"Curve", Type, 20, ""}, + {"(Curve).GenerateKey", Method, 20, ""}, + {"(Curve).NewPrivateKey", Method, 20, ""}, + {"(Curve).NewPublicKey", Method, 20, ""}, + {"(KeyExchanger).Curve", Method, 26, ""}, + {"(KeyExchanger).ECDH", Method, 26, ""}, + {"(KeyExchanger).PublicKey", Method, 26, ""}, + {"KeyExchanger", Type, 26, ""}, {"P256", Func, 20, "func() Curve"}, {"P384", Func, 20, "func() Curve"}, {"P521", Func, 20, "func() Curve"}, @@ -667,6 +718,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*CurveParams).Params", Method, 0, ""}, {"(*CurveParams).ScalarBaseMult", Method, 0, ""}, {"(*CurveParams).ScalarMult", Method, 0, ""}, + {"(Curve).Add", Method, 0, ""}, + {"(Curve).Double", Method, 0, ""}, + {"(Curve).IsOnCurve", Method, 0, ""}, + {"(Curve).Params", Method, 0, ""}, + {"(Curve).ScalarBaseMult", Method, 0, ""}, + {"(Curve).ScalarMult", Method, 0, ""}, {"Curve", Type, 0, ""}, {"CurveParams", Type, 0, ""}, {"CurveParams.B", Field, 0, ""}, @@ -688,6 +745,7 @@ var PackageSymbols = map[string][]Symbol{ }, "crypto/fips140": { {"Enabled", Func, 24, "func() bool"}, + {"Version", Func, 26, "func() string"}, }, "crypto/hkdf": { {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"}, @@ -708,9 +766,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*DecapsulationKey1024).Bytes", Method, 24, ""}, {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey1024).Encapsulator", Method, 26, ""}, {"(*DecapsulationKey768).Bytes", Method, 24, ""}, {"(*DecapsulationKey768).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey768).Encapsulator", Method, 26, ""}, {"(*EncapsulationKey1024).Bytes", Method, 24, ""}, {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""}, {"(*EncapsulationKey768).Bytes", Method, 24, ""}, @@ -732,6 +792,10 @@ var PackageSymbols = map[string][]Symbol{ {"SeedSize", Const, 24, ""}, {"SharedKeySize", Const, 24, ""}, }, + "crypto/mlkem/mlkemtest": { + {"Encapsulate1024", Func, 26, "func(ek *mlkem.EncapsulationKey1024, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + {"Encapsulate768", Func, 26, "func(ek *mlkem.EncapsulationKey768, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + }, "crypto/pbkdf2": { {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"}, }, @@ -769,6 +833,7 @@ var PackageSymbols = map[string][]Symbol{ {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"}, {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"}, {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"}, + {"EncryptOAEPWithOptions", Func, 26, "func(random io.Reader, pub *PublicKey, msg []byte, opts *OAEPOptions) ([]byte, error)"}, {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"}, {"ErrDecryption", Var, 0, ""}, {"ErrMessageTooLong", Var, 0, ""}, @@ -921,6 +986,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*SessionState).Bytes", Method, 21, ""}, {"(AlertError).Error", Method, 21, ""}, {"(ClientAuthType).String", Method, 15, ""}, + {"(ClientSessionCache).Get", Method, 3, ""}, + {"(ClientSessionCache).Put", Method, 3, ""}, {"(CurveID).String", Method, 15, ""}, {"(QUICEncryptionLevel).String", Method, 21, ""}, {"(RecordHeaderError).Error", Method, 6, ""}, @@ -953,6 +1020,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo.CipherSuites", Field, 4, ""}, {"ClientHelloInfo.Conn", Field, 8, ""}, {"ClientHelloInfo.Extensions", Field, 24, ""}, + {"ClientHelloInfo.HelloRetryRequest", Field, 26, ""}, {"ClientHelloInfo.ServerName", Field, 4, ""}, {"ClientHelloInfo.SignatureSchemes", Field, 8, ""}, {"ClientHelloInfo.SupportedCurves", Field, 4, ""}, @@ -1001,6 +1069,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState.DidResume", Field, 1, ""}, {"ConnectionState.ECHAccepted", Field, 23, ""}, {"ConnectionState.HandshakeComplete", Field, 0, ""}, + {"ConnectionState.HelloRetryRequest", Field, 26, ""}, {"ConnectionState.NegotiatedProtocol", Field, 0, ""}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""}, {"ConnectionState.OCSPResponse", Field, 5, ""}, @@ -1055,8 +1124,10 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEncryptionLevelEarly", Const, 21, ""}, {"QUICEncryptionLevelHandshake", Const, 21, ""}, {"QUICEncryptionLevelInitial", Const, 21, ""}, + {"QUICErrorEvent", Const, 26, ""}, {"QUICEvent", Type, 21, ""}, {"QUICEvent.Data", Field, 21, ""}, + {"QUICEvent.Err", Field, 26, ""}, {"QUICEvent.Kind", Field, 21, ""}, {"QUICEvent.Level", Field, 21, ""}, {"QUICEvent.SessionState", Field, 23, ""}, @@ -1151,8 +1222,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""}, {"(CertificateInvalidError).Error", Method, 0, ""}, {"(ConstraintViolationError).Error", Method, 0, ""}, + {"(ExtKeyUsage).String", Method, 26, ""}, {"(HostnameError).Error", Method, 0, ""}, {"(InsecureAlgorithmError).Error", Method, 6, ""}, + {"(KeyUsage).String", Method, 26, ""}, {"(OID).AppendBinary", Method, 24, ""}, {"(OID).AppendText", Method, 24, ""}, {"(OID).Equal", Method, 22, ""}, @@ -1516,6 +1589,9 @@ var PackageSymbols = map[string][]Symbol{ {"(NullInt64).Value", Method, 0, ""}, {"(NullString).Value", Method, 0, ""}, {"(NullTime).Value", Method, 13, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, {"ColumnType", Type, 8, ""}, {"Conn", Type, 9, ""}, {"DB", Type, 0, ""}, @@ -1547,8 +1623,6 @@ var PackageSymbols = map[string][]Symbol{ {"NamedArg.Name", Field, 8, ""}, {"NamedArg.Value", Field, 8, ""}, {"Null", Type, 22, ""}, - {"Null.V", Field, 22, ""}, - {"Null.Valid", Field, 22, ""}, {"NullBool", Type, 0, ""}, {"NullBool.Bool", Field, 0, ""}, {"NullBool.Valid", Field, 0, ""}, @@ -1591,10 +1665,72 @@ var PackageSymbols = map[string][]Symbol{ {"TxOptions.ReadOnly", Field, 8, ""}, }, "database/sql/driver": { + {"(ColumnConverter).ColumnConverter", Method, 0, ""}, + {"(Conn).Begin", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).Prepare", Method, 0, ""}, + {"(ConnBeginTx).BeginTx", Method, 8, ""}, + {"(ConnPrepareContext).PrepareContext", Method, 8, ""}, + {"(Connector).Connect", Method, 10, ""}, + {"(Connector).Driver", Method, 10, ""}, + {"(Driver).Open", Method, 0, ""}, + {"(DriverContext).OpenConnector", Method, 10, ""}, + {"(Execer).Exec", Method, 0, ""}, + {"(ExecerContext).ExecContext", Method, 8, ""}, + {"(NamedValueChecker).CheckNamedValue", Method, 9, ""}, {"(NotNull).ConvertValue", Method, 0, ""}, {"(Null).ConvertValue", Method, 0, ""}, + {"(Pinger).Ping", Method, 8, ""}, + {"(Queryer).Query", Method, 1, ""}, + {"(QueryerContext).QueryContext", Method, 8, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Rows).Close", Method, 0, ""}, + {"(Rows).Columns", Method, 0, ""}, + {"(Rows).Next", Method, 0, ""}, {"(RowsAffected).LastInsertId", Method, 0, ""}, {"(RowsAffected).RowsAffected", Method, 0, ""}, + {"(RowsColumnScanner).Close", Method, 26, ""}, + {"(RowsColumnScanner).Columns", Method, 26, ""}, + {"(RowsColumnScanner).Next", Method, 26, ""}, + {"(RowsColumnScanner).ScanColumn", Method, 26, ""}, + {"(RowsColumnTypeDatabaseTypeName).Close", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).ColumnTypeDatabaseTypeName", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Columns", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Next", Method, 8, ""}, + {"(RowsColumnTypeLength).Close", Method, 8, ""}, + {"(RowsColumnTypeLength).ColumnTypeLength", Method, 8, ""}, + {"(RowsColumnTypeLength).Columns", Method, 8, ""}, + {"(RowsColumnTypeLength).Next", Method, 8, ""}, + {"(RowsColumnTypeNullable).Close", Method, 8, ""}, + {"(RowsColumnTypeNullable).ColumnTypeNullable", Method, 8, ""}, + {"(RowsColumnTypeNullable).Columns", Method, 8, ""}, + {"(RowsColumnTypeNullable).Next", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Close", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).ColumnTypePrecisionScale", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Columns", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Next", Method, 8, ""}, + {"(RowsColumnTypeScanType).Close", Method, 8, ""}, + {"(RowsColumnTypeScanType).ColumnTypeScanType", Method, 8, ""}, + {"(RowsColumnTypeScanType).Columns", Method, 8, ""}, + {"(RowsColumnTypeScanType).Next", Method, 8, ""}, + {"(RowsNextResultSet).Close", Method, 8, ""}, + {"(RowsNextResultSet).Columns", Method, 8, ""}, + {"(RowsNextResultSet).HasNextResultSet", Method, 8, ""}, + {"(RowsNextResultSet).Next", Method, 8, ""}, + {"(RowsNextResultSet).NextResultSet", Method, 8, ""}, + {"(SessionResetter).ResetSession", Method, 10, ""}, + {"(Stmt).Close", Method, 0, ""}, + {"(Stmt).Exec", Method, 0, ""}, + {"(Stmt).NumInput", Method, 0, ""}, + {"(Stmt).Query", Method, 0, ""}, + {"(StmtExecContext).ExecContext", Method, 8, ""}, + {"(StmtQueryContext).QueryContext", Method, 8, ""}, + {"(Tx).Commit", Method, 0, ""}, + {"(Tx).Rollback", Method, 0, ""}, + {"(Validator).IsValid", Method, 15, ""}, + {"(ValueConverter).ConvertValue", Method, 0, ""}, + {"(Valuer).Value", Method, 0, ""}, {"Bool", Var, 0, ""}, {"ColumnConverter", Type, 0, ""}, {"Conn", Type, 0, ""}, @@ -1756,6 +1892,9 @@ var PackageSymbols = map[string][]Symbol{ {"(DecodeError).Error", Method, 0, ""}, {"(Tag).GoString", Method, 0, ""}, {"(Tag).String", Method, 0, ""}, + {"(Type).Common", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"AddrType", Type, 0, ""}, {"AddrType.BasicType", Field, 0, ""}, {"ArrayType", Type, 0, ""}, @@ -3163,6 +3302,7 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_B16", Const, 20, ""}, {"R_LARCH_B21", Const, 20, ""}, {"R_LARCH_B26", Const, 20, ""}, + {"R_LARCH_CALL36", Const, 26, ""}, {"R_LARCH_CFA", Const, 22, ""}, {"R_LARCH_COPY", Const, 19, ""}, {"R_LARCH_DELETE", Const, 22, ""}, @@ -3220,11 +3360,25 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_SUB64", Const, 19, ""}, {"R_LARCH_SUB8", Const, 19, ""}, {"R_LARCH_SUB_ULEB128", Const, 22, ""}, + {"R_LARCH_TLS_DESC32", Const, 26, ""}, + {"R_LARCH_TLS_DESC64", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_CALL", Const, 26, ""}, + {"R_LARCH_TLS_DESC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LD", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LO12", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PCREL20_S2", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_LO12", Const, 26, ""}, {"R_LARCH_TLS_DTPMOD32", Const, 19, ""}, {"R_LARCH_TLS_DTPMOD64", Const, 19, ""}, {"R_LARCH_TLS_DTPREL32", Const, 19, ""}, {"R_LARCH_TLS_DTPREL64", Const, 19, ""}, {"R_LARCH_TLS_GD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_GD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_IE64_LO20", Const, 20, ""}, @@ -3235,11 +3389,15 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""}, {"R_LARCH_TLS_LD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_LE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_LE64_LO20", Const, 20, ""}, + {"R_LARCH_TLS_LE_ADD_R", Const, 26, ""}, {"R_LARCH_TLS_LE_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LE_HI20_R", Const, 26, ""}, {"R_LARCH_TLS_LE_LO12", Const, 20, ""}, + {"R_LARCH_TLS_LE_LO12_R", Const, 26, ""}, {"R_LARCH_TLS_TPREL32", Const, 19, ""}, {"R_LARCH_TLS_TPREL64", Const, 19, ""}, {"R_MIPS", Type, 6, ""}, @@ -3944,6 +4102,7 @@ var PackageSymbols = map[string][]Symbol{ {"(FatArch).ImportedSymbols", Method, 3, ""}, {"(FatArch).Section", Method, 3, ""}, {"(FatArch).Segment", Method, 3, ""}, + {"(Load).Raw", Method, 0, ""}, {"(LoadBytes).Raw", Method, 0, ""}, {"(LoadCmd).GoString", Method, 0, ""}, {"(LoadCmd).String", Method, 0, ""}, @@ -4590,6 +4749,12 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16, ""}, }, "encoding": { + {"(BinaryAppender).AppendBinary", Method, 24, ""}, + {"(BinaryMarshaler).MarshalBinary", Method, 2, ""}, + {"(BinaryUnmarshaler).UnmarshalBinary", Method, 2, ""}, + {"(TextAppender).AppendText", Method, 24, ""}, + {"(TextMarshaler).MarshalText", Method, 2, ""}, + {"(TextUnmarshaler).UnmarshalText", Method, 2, ""}, {"BinaryAppender", Type, 24, ""}, {"BinaryMarshaler", Type, 2, ""}, {"BinaryUnmarshaler", Type, 2, ""}, @@ -4705,6 +4870,17 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0, ""}, }, "encoding/binary": { + {"(AppendByteOrder).AppendUint16", Method, 19, ""}, + {"(AppendByteOrder).AppendUint32", Method, 19, ""}, + {"(AppendByteOrder).AppendUint64", Method, 19, ""}, + {"(AppendByteOrder).String", Method, 19, ""}, + {"(ByteOrder).PutUint16", Method, 0, ""}, + {"(ByteOrder).PutUint32", Method, 0, ""}, + {"(ByteOrder).PutUint64", Method, 0, ""}, + {"(ByteOrder).String", Method, 0, ""}, + {"(ByteOrder).Uint16", Method, 0, ""}, + {"(ByteOrder).Uint32", Method, 0, ""}, + {"(ByteOrder).Uint64", Method, 0, ""}, {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"}, {"AppendByteOrder", Type, 19, ""}, {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"}, @@ -4767,6 +4943,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Decoder).DecodeValue", Method, 0, ""}, {"(*Encoder).Encode", Method, 0, ""}, {"(*Encoder).EncodeValue", Method, 0, ""}, + {"(GobDecoder).GobDecode", Method, 0, ""}, + {"(GobEncoder).GobEncode", Method, 0, ""}, {"CommonType", Type, 0, ""}, {"CommonType.Id", Field, 0, ""}, {"CommonType.Name", Field, 0, ""}, @@ -4819,10 +4997,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnsupportedTypeError).Error", Method, 0, ""}, {"(*UnsupportedValueError).Error", Method, 0, ""}, {"(Delim).String", Method, 5, ""}, + {"(Marshaler).MarshalJSON", Method, 0, ""}, {"(Number).Float64", Method, 1, ""}, {"(Number).Int64", Method, 1, ""}, {"(Number).String", Method, 1, ""}, {"(RawMessage).MarshalJSON", Method, 8, ""}, + {"(Unmarshaler).UnmarshalJSON", Method, 0, ""}, {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"}, {"Decoder", Type, 0, ""}, {"Delim", Type, 5, ""}, @@ -4894,10 +5074,15 @@ var PackageSymbols = map[string][]Symbol{ {"(CharData).Copy", Method, 0, ""}, {"(Comment).Copy", Method, 0, ""}, {"(Directive).Copy", Method, 0, ""}, + {"(Marshaler).MarshalXML", Method, 2, ""}, + {"(MarshalerAttr).MarshalXMLAttr", Method, 2, ""}, {"(ProcInst).Copy", Method, 0, ""}, {"(StartElement).Copy", Method, 0, ""}, {"(StartElement).End", Method, 2, ""}, + {"(TokenReader).Token", Method, 10, ""}, {"(UnmarshalError).Error", Method, 0, ""}, + {"(Unmarshaler).UnmarshalXML", Method, 2, ""}, + {"(UnmarshalerAttr).UnmarshalXMLAttr", Method, 2, ""}, {"Attr", Type, 0, ""}, {"Attr.Name", Field, 0, ""}, {"Attr.Value", Field, 0, ""}, @@ -4984,6 +5169,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*String).Value", Method, 8, ""}, {"(Func).String", Method, 0, ""}, {"(Func).Value", Method, 8, ""}, + {"(Var).String", Method, 0, ""}, {"Do", Func, 0, "func(f func(KeyValue))"}, {"Float", Type, 0, ""}, {"Func", Type, 0, ""}, @@ -5039,6 +5225,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*FlagSet).Var", Method, 0, ""}, {"(*FlagSet).Visit", Method, 0, ""}, {"(*FlagSet).VisitAll", Method, 0, ""}, + {"(Getter).Get", Method, 2, ""}, + {"(Getter).Set", Method, 2, ""}, + {"(Getter).String", Method, 2, ""}, + {"(Value).Set", Method, 0, ""}, + {"(Value).String", Method, 0, ""}, {"Arg", Func, 0, "func(i int) string"}, {"Args", Func, 0, "func() []string"}, {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"}, @@ -5090,6 +5281,20 @@ var PackageSymbols = map[string][]Symbol{ {"VisitAll", Func, 0, "func(fn func(*Flag))"}, }, "fmt": { + {"(Formatter).Format", Method, 0, ""}, + {"(GoStringer).GoString", Method, 0, ""}, + {"(ScanState).Read", Method, 0, ""}, + {"(ScanState).ReadRune", Method, 0, ""}, + {"(ScanState).SkipSpace", Method, 0, ""}, + {"(ScanState).Token", Method, 0, ""}, + {"(ScanState).UnreadRune", Method, 0, ""}, + {"(ScanState).Width", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, + {"(State).Flag", Method, 0, ""}, + {"(State).Precision", Method, 0, ""}, + {"(State).Width", Method, 0, ""}, + {"(State).Write", Method, 0, ""}, + {"(Stringer).String", Method, 0, ""}, {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, @@ -5248,7 +5453,18 @@ var PackageSymbols = map[string][]Symbol{ {"(CommentMap).Filter", Method, 1, ""}, {"(CommentMap).String", Method, 1, ""}, {"(CommentMap).Update", Method, 1, ""}, + {"(Decl).End", Method, 0, ""}, + {"(Decl).Pos", Method, 0, ""}, + {"(Expr).End", Method, 0, ""}, + {"(Expr).Pos", Method, 0, ""}, + {"(Node).End", Method, 0, ""}, + {"(Node).Pos", Method, 0, ""}, {"(ObjKind).String", Method, 0, ""}, + {"(Spec).End", Method, 0, ""}, + {"(Spec).Pos", Method, 0, ""}, + {"(Stmt).End", Method, 0, ""}, + {"(Stmt).Pos", Method, 0, ""}, + {"(Visitor).Visit", Method, 0, ""}, {"ArrayType", Type, 0, ""}, {"ArrayType.Elt", Field, 0, ""}, {"ArrayType.Lbrack", Field, 0, ""}, @@ -5271,6 +5487,7 @@ var PackageSymbols = map[string][]Symbol{ {"BasicLit", Type, 0, ""}, {"BasicLit.Kind", Field, 0, ""}, {"BasicLit.Value", Field, 0, ""}, + {"BasicLit.ValueEnd", Field, 26, ""}, {"BasicLit.ValuePos", Field, 0, ""}, {"BinaryExpr", Type, 0, ""}, {"BinaryExpr.Op", Field, 0, ""}, @@ -5320,7 +5537,6 @@ var PackageSymbols = map[string][]Symbol{ {"CompositeLit.Rbrace", Field, 0, ""}, {"CompositeLit.Type", Field, 0, ""}, {"Con", Const, 0, ""}, - {"Decl", Type, 0, ""}, {"DeclStmt", Type, 0, ""}, {"DeclStmt.Decl", Field, 0, ""}, {"DeferStmt", Type, 0, ""}, @@ -5341,7 +5557,6 @@ var PackageSymbols = map[string][]Symbol{ {"EmptyStmt", Type, 0, ""}, {"EmptyStmt.Implicit", Field, 5, ""}, {"EmptyStmt.Semicolon", Field, 0, ""}, - {"Expr", Type, 0, ""}, {"ExprStmt", Type, 0, ""}, {"ExprStmt.X", Field, 0, ""}, {"Field", Type, 0, ""}, @@ -5525,11 +5740,9 @@ var PackageSymbols = map[string][]Symbol{ {"SliceExpr.Slice3", Field, 2, ""}, {"SliceExpr.X", Field, 0, ""}, {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"}, - {"Spec", Type, 0, ""}, {"StarExpr", Type, 0, ""}, {"StarExpr.Star", Field, 0, ""}, {"StarExpr.X", Field, 0, ""}, - {"Stmt", Type, 0, ""}, {"StructType", Type, 0, ""}, {"StructType.Fields", Field, 0, ""}, {"StructType.Incomplete", Field, 0, ""}, @@ -5684,10 +5897,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyntaxError).Error", Method, 16, ""}, {"(*TagExpr).Eval", Method, 16, ""}, {"(*TagExpr).String", Method, 16, ""}, + {"(Expr).Eval", Method, 16, ""}, + {"(Expr).String", Method, 16, ""}, {"AndExpr", Type, 16, ""}, {"AndExpr.X", Field, 16, ""}, {"AndExpr.Y", Field, 16, ""}, - {"Expr", Type, 16, ""}, {"GoVersion", Func, 21, "func(x Expr) string"}, {"IsGoBuild", Func, 16, "func(line string) bool"}, {"IsPlusBuild", Func, 16, "func(line string) bool"}, @@ -5706,6 +5920,9 @@ var PackageSymbols = map[string][]Symbol{ }, "go/constant": { {"(Kind).String", Method, 18, ""}, + {"(Value).ExactString", Method, 6, ""}, + {"(Value).Kind", Method, 5, ""}, + {"(Value).String", Method, 5, ""}, {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"}, {"BitLen", Func, 5, "func(x Value) int"}, {"Bool", Const, 5, ""}, @@ -5744,7 +5961,6 @@ var PackageSymbols = map[string][]Symbol{ {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"}, {"Unknown", Const, 5, ""}, {"Val", Func, 13, "func(x Value) any"}, - {"Value", Type, 5, ""}, }, "go/doc": { {"(*Package).Filter", Method, 0, ""}, @@ -5828,7 +6044,6 @@ var PackageSymbols = map[string][]Symbol{ {"(*Printer).HTML", Method, 19, ""}, {"(*Printer).Markdown", Method, 19, ""}, {"(*Printer).Text", Method, 19, ""}, - {"Block", Type, 19, ""}, {"Code", Type, 19, ""}, {"Code.Text", Field, 19, ""}, {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"}, @@ -5873,7 +6088,6 @@ var PackageSymbols = map[string][]Symbol{ {"Printer.TextCodePrefix", Field, 19, ""}, {"Printer.TextPrefix", Field, 19, ""}, {"Printer.TextWidth", Field, 19, ""}, - {"Text", Type, 19, ""}, }, "go/format": { {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"}, @@ -5945,6 +6159,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).AddLineColumnInfo", Method, 11, ""}, {"(*File).AddLineInfo", Method, 0, ""}, {"(*File).Base", Method, 0, ""}, + {"(*File).End", Method, 26, ""}, {"(*File).Line", Method, 0, ""}, {"(*File).LineCount", Method, 0, ""}, {"(*File).LineStart", Method, 12, ""}, @@ -6307,6 +6522,22 @@ var PackageSymbols = map[string][]Symbol{ {"(Checker).PkgNameOf", Method, 22, ""}, {"(Checker).TypeOf", Method, 5, ""}, {"(Error).Error", Method, 5, ""}, + {"(Importer).Import", Method, 5, ""}, + {"(ImporterFrom).Import", Method, 6, ""}, + {"(ImporterFrom).ImportFrom", Method, 6, ""}, + {"(Object).Exported", Method, 5, ""}, + {"(Object).Id", Method, 5, ""}, + {"(Object).Name", Method, 5, ""}, + {"(Object).Parent", Method, 5, ""}, + {"(Object).Pkg", Method, 5, ""}, + {"(Object).Pos", Method, 5, ""}, + {"(Object).String", Method, 5, ""}, + {"(Object).Type", Method, 5, ""}, + {"(Sizes).Alignof", Method, 5, ""}, + {"(Sizes).Offsetsof", Method, 5, ""}, + {"(Sizes).Sizeof", Method, 5, ""}, + {"(Type).String", Method, 5, ""}, + {"(Type).Underlying", Method, 5, ""}, {"(TypeAndValue).Addressable", Method, 5, ""}, {"(TypeAndValue).Assignable", Method, 5, ""}, {"(TypeAndValue).HasOk", Method, 5, ""}, @@ -6445,7 +6676,6 @@ var PackageSymbols = map[string][]Symbol{ {"NewUnion", Func, 18, "func(terms []*Term) *Union"}, {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"}, {"Nil", Type, 5, ""}, - {"Object", Type, 5, ""}, {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"}, {"Package", Type, 5, ""}, {"PackageVar", Const, 25, ""}, @@ -6516,6 +6746,33 @@ var PackageSymbols = map[string][]Symbol{ {"Lang", Func, 22, "func(x string) string"}, }, "hash": { + {"(Cloner).BlockSize", Method, 25, ""}, + {"(Cloner).Clone", Method, 25, ""}, + {"(Cloner).Reset", Method, 25, ""}, + {"(Cloner).Size", Method, 25, ""}, + {"(Cloner).Sum", Method, 25, ""}, + {"(Cloner).Write", Method, 25, ""}, + {"(Hash).BlockSize", Method, 0, ""}, + {"(Hash).Reset", Method, 0, ""}, + {"(Hash).Size", Method, 0, ""}, + {"(Hash).Sum", Method, 0, ""}, + {"(Hash).Write", Method, 0, ""}, + {"(Hash32).BlockSize", Method, 0, ""}, + {"(Hash32).Reset", Method, 0, ""}, + {"(Hash32).Size", Method, 0, ""}, + {"(Hash32).Sum", Method, 0, ""}, + {"(Hash32).Sum32", Method, 0, ""}, + {"(Hash32).Write", Method, 0, ""}, + {"(Hash64).BlockSize", Method, 0, ""}, + {"(Hash64).Reset", Method, 0, ""}, + {"(Hash64).Size", Method, 0, ""}, + {"(Hash64).Sum", Method, 0, ""}, + {"(Hash64).Sum64", Method, 0, ""}, + {"(Hash64).Write", Method, 0, ""}, + {"(XOF).BlockSize", Method, 25, ""}, + {"(XOF).Read", Method, 25, ""}, + {"(XOF).Reset", Method, 25, ""}, + {"(XOF).Write", Method, 25, ""}, {"Cloner", Type, 25, ""}, {"Hash", Type, 0, ""}, {"Hash32", Type, 0, ""}, @@ -6781,6 +7038,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*YCbCr).SubImage", Method, 0, ""}, {"(*YCbCr).YCbCrAt", Method, 4, ""}, {"(*YCbCr).YOffset", Method, 0, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(PalettedImage).At", Method, 0, ""}, + {"(PalettedImage).Bounds", Method, 0, ""}, + {"(PalettedImage).ColorIndexAt", Method, 0, ""}, + {"(PalettedImage).ColorModel", Method, 0, ""}, {"(Point).Add", Method, 0, ""}, {"(Point).Div", Method, 0, ""}, {"(Point).Eq", Method, 0, ""}, @@ -6789,6 +7053,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Point).Mul", Method, 0, ""}, {"(Point).String", Method, 0, ""}, {"(Point).Sub", Method, 0, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, {"(Rectangle).Add", Method, 0, ""}, {"(Rectangle).At", Method, 5, ""}, {"(Rectangle).Bounds", Method, 5, ""}, @@ -6913,8 +7181,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Alpha).RGBA", Method, 0, ""}, {"(Alpha16).RGBA", Method, 0, ""}, {"(CMYK).RGBA", Method, 5, ""}, + {"(Color).RGBA", Method, 0, ""}, {"(Gray).RGBA", Method, 0, ""}, {"(Gray16).RGBA", Method, 0, ""}, + {"(Model).Convert", Method, 0, ""}, {"(NRGBA).RGBA", Method, 0, ""}, {"(NRGBA64).RGBA", Method, 0, ""}, {"(NYCbCrA).RGBA", Method, 6, ""}, @@ -6992,7 +7262,19 @@ var PackageSymbols = map[string][]Symbol{ {"WebSafe", Var, 2, ""}, }, "image/draw": { + {"(Drawer).Draw", Method, 2, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(Image).Set", Method, 0, ""}, {"(Op).Draw", Method, 2, ""}, + {"(Quantizer).Quantize", Method, 2, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, + {"(RGBA64Image).Set", Method, 17, ""}, + {"(RGBA64Image).SetRGBA64", Method, 17, ""}, {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"}, {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"}, {"Drawer", Type, 2, ""}, @@ -7027,6 +7309,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/jpeg": { {"(FormatError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, @@ -7040,6 +7324,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/png": { {"(*Encoder).Encode", Method, 4, ""}, + {"(EncoderBufferPool).Get", Method, 9, ""}, + {"(EncoderBufferPool).Put", Method, 9, ""}, {"(FormatError).Error", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"BestCompression", Const, 4, ""}, @@ -7083,6 +7369,41 @@ var PackageSymbols = map[string][]Symbol{ {"(*SectionReader).ReadAt", Method, 0, ""}, {"(*SectionReader).Seek", Method, 0, ""}, {"(*SectionReader).Size", Method, 0, ""}, + {"(ByteReader).ReadByte", Method, 0, ""}, + {"(ByteScanner).ReadByte", Method, 0, ""}, + {"(ByteScanner).UnreadByte", Method, 0, ""}, + {"(ByteWriter).WriteByte", Method, 1, ""}, + {"(Closer).Close", Method, 0, ""}, + {"(ReadCloser).Close", Method, 0, ""}, + {"(ReadCloser).Read", Method, 0, ""}, + {"(ReadSeekCloser).Close", Method, 16, ""}, + {"(ReadSeekCloser).Read", Method, 16, ""}, + {"(ReadSeekCloser).Seek", Method, 16, ""}, + {"(ReadSeeker).Read", Method, 0, ""}, + {"(ReadSeeker).Seek", Method, 0, ""}, + {"(ReadWriteCloser).Close", Method, 0, ""}, + {"(ReadWriteCloser).Read", Method, 0, ""}, + {"(ReadWriteCloser).Write", Method, 0, ""}, + {"(ReadWriteSeeker).Read", Method, 0, ""}, + {"(ReadWriteSeeker).Seek", Method, 0, ""}, + {"(ReadWriteSeeker).Write", Method, 0, ""}, + {"(ReadWriter).Read", Method, 0, ""}, + {"(ReadWriter).Write", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(ReaderAt).ReadAt", Method, 0, ""}, + {"(ReaderFrom).ReadFrom", Method, 0, ""}, + {"(RuneReader).ReadRune", Method, 0, ""}, + {"(RuneScanner).ReadRune", Method, 0, ""}, + {"(RuneScanner).UnreadRune", Method, 0, ""}, + {"(Seeker).Seek", Method, 0, ""}, + {"(StringWriter).WriteString", Method, 12, ""}, + {"(WriteCloser).Close", Method, 0, ""}, + {"(WriteCloser).Write", Method, 0, ""}, + {"(WriteSeeker).Seek", Method, 0, ""}, + {"(WriteSeeker).Write", Method, 0, ""}, + {"(Writer).Write", Method, 0, ""}, + {"(WriterAt).WriteAt", Method, 0, ""}, + {"(WriterTo).WriteTo", Method, 0, ""}, {"ByteReader", Type, 0, ""}, {"ByteScanner", Type, 0, ""}, {"ByteWriter", Type, 1, ""}, @@ -7142,11 +7463,42 @@ var PackageSymbols = map[string][]Symbol{ {"(*PathError).Error", Method, 16, ""}, {"(*PathError).Timeout", Method, 16, ""}, {"(*PathError).Unwrap", Method, 16, ""}, + {"(DirEntry).Info", Method, 16, ""}, + {"(DirEntry).IsDir", Method, 16, ""}, + {"(DirEntry).Name", Method, 16, ""}, + {"(DirEntry).Type", Method, 16, ""}, + {"(FS).Open", Method, 16, ""}, + {"(File).Close", Method, 16, ""}, + {"(File).Read", Method, 16, ""}, + {"(File).Stat", Method, 16, ""}, + {"(FileInfo).IsDir", Method, 16, ""}, + {"(FileInfo).ModTime", Method, 16, ""}, + {"(FileInfo).Mode", Method, 16, ""}, + {"(FileInfo).Name", Method, 16, ""}, + {"(FileInfo).Size", Method, 16, ""}, + {"(FileInfo).Sys", Method, 16, ""}, {"(FileMode).IsDir", Method, 16, ""}, {"(FileMode).IsRegular", Method, 16, ""}, {"(FileMode).Perm", Method, 16, ""}, {"(FileMode).String", Method, 16, ""}, {"(FileMode).Type", Method, 16, ""}, + {"(GlobFS).Glob", Method, 16, ""}, + {"(GlobFS).Open", Method, 16, ""}, + {"(ReadDirFS).Open", Method, 16, ""}, + {"(ReadDirFS).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Close", Method, 16, ""}, + {"(ReadDirFile).Read", Method, 16, ""}, + {"(ReadDirFile).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Stat", Method, 16, ""}, + {"(ReadFileFS).Open", Method, 16, ""}, + {"(ReadFileFS).ReadFile", Method, 16, ""}, + {"(ReadLinkFS).Lstat", Method, 25, ""}, + {"(ReadLinkFS).Open", Method, 25, ""}, + {"(ReadLinkFS).ReadLink", Method, 25, ""}, + {"(StatFS).Open", Method, 16, ""}, + {"(StatFS).Stat", Method, 16, ""}, + {"(SubFS).Open", Method, 16, ""}, + {"(SubFS).Sub", Method, 16, ""}, {"DirEntry", Type, 16, ""}, {"ErrClosed", Var, 16, ""}, {"ErrExist", Var, 16, ""}, @@ -7299,12 +7651,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*TextHandler).WithGroup", Method, 21, ""}, {"(Attr).Equal", Method, 21, ""}, {"(Attr).String", Method, 21, ""}, + {"(Handler).Enabled", Method, 21, ""}, + {"(Handler).Handle", Method, 21, ""}, + {"(Handler).WithAttrs", Method, 21, ""}, + {"(Handler).WithGroup", Method, 21, ""}, {"(Kind).String", Method, 21, ""}, {"(Level).AppendText", Method, 24, ""}, {"(Level).Level", Method, 21, ""}, {"(Level).MarshalJSON", Method, 21, ""}, {"(Level).MarshalText", Method, 21, ""}, {"(Level).String", Method, 21, ""}, + {"(Leveler).Level", Method, 21, ""}, + {"(LogValuer).LogValue", Method, 21, ""}, {"(Record).Attrs", Method, 21, ""}, {"(Record).Clone", Method, 21, ""}, {"(Record).NumAttrs", Method, 21, ""}, @@ -7833,6 +8191,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint32", Method, 0, ""}, {"(*Rand).Uint64", Method, 8, ""}, {"(*Zipf).Uint64", Method, 0, ""}, + {"(Source).Int63", Method, 0, ""}, + {"(Source).Seed", Method, 0, ""}, + {"(Source64).Int63", Method, 8, ""}, + {"(Source64).Seed", Method, 8, ""}, + {"(Source64).Uint64", Method, 8, ""}, {"ExpFloat64", Func, 0, "func() float64"}, {"Float32", Func, 0, "func() float32"}, {"Float64", Func, 0, "func() float64"}, @@ -7888,6 +8251,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint64N", Method, 22, ""}, {"(*Rand).UintN", Method, 22, ""}, {"(*Zipf).Uint64", Method, 22, ""}, + {"(Source).Uint64", Method, 22, ""}, {"ChaCha8", Type, 22, ""}, {"ExpFloat64", Func, 22, "func() float64"}, {"Float32", Func, 22, "func() float32"}, @@ -7951,6 +8315,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).FormDataContentType", Method, 0, ""}, {"(*Writer).SetBoundary", Method, 1, ""}, {"(*Writer).WriteField", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).ReadAt", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, {"ErrMessageTooLarge", Var, 9, ""}, {"File", Type, 0, ""}, {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"}, @@ -8135,6 +8503,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SetDeadline", Method, 0, ""}, {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""}, {"(*UnixListener).SyscallConn", Method, 10, ""}, + {"(Addr).Network", Method, 0, ""}, + {"(Addr).String", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).LocalAddr", Method, 0, ""}, + {"(Conn).Read", Method, 0, ""}, + {"(Conn).RemoteAddr", Method, 0, ""}, + {"(Conn).SetDeadline", Method, 0, ""}, + {"(Conn).SetReadDeadline", Method, 0, ""}, + {"(Conn).SetWriteDeadline", Method, 0, ""}, + {"(Conn).Write", Method, 0, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).Temporary", Method, 0, ""}, + {"(Error).Timeout", Method, 0, ""}, {"(Flags).String", Method, 0, ""}, {"(HardwareAddr).String", Method, 0, ""}, {"(IP).AppendText", Method, 24, ""}, @@ -8158,6 +8539,16 @@ var PackageSymbols = map[string][]Symbol{ {"(InvalidAddrError).Error", Method, 0, ""}, {"(InvalidAddrError).Temporary", Method, 0, ""}, {"(InvalidAddrError).Timeout", Method, 0, ""}, + {"(Listener).Accept", Method, 0, ""}, + {"(Listener).Addr", Method, 0, ""}, + {"(Listener).Close", Method, 0, ""}, + {"(PacketConn).Close", Method, 0, ""}, + {"(PacketConn).LocalAddr", Method, 0, ""}, + {"(PacketConn).ReadFrom", Method, 0, ""}, + {"(PacketConn).SetDeadline", Method, 0, ""}, + {"(PacketConn).SetReadDeadline", Method, 0, ""}, + {"(PacketConn).SetWriteDeadline", Method, 0, ""}, + {"(PacketConn).WriteTo", Method, 0, ""}, {"(UnknownNetworkError).Error", Method, 0, ""}, {"(UnknownNetworkError).Temporary", Method, 0, ""}, {"(UnknownNetworkError).Timeout", Method, 0, ""}, @@ -8333,6 +8724,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).Head", Method, 0, ""}, {"(*Client).Post", Method, 0, ""}, {"(*Client).PostForm", Method, 0, ""}, + {"(*ClientConn).Available", Method, 26, ""}, + {"(*ClientConn).Close", Method, 26, ""}, + {"(*ClientConn).Err", Method, 26, ""}, + {"(*ClientConn).InFlight", Method, 26, ""}, + {"(*ClientConn).Release", Method, 26, ""}, + {"(*ClientConn).Reserve", Method, 26, ""}, + {"(*ClientConn).RoundTrip", Method, 26, ""}, + {"(*ClientConn).SetStateHook", Method, 26, ""}, {"(*Cookie).String", Method, 0, ""}, {"(*Cookie).Valid", Method, 18, ""}, {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""}, @@ -8392,10 +8791,22 @@ var PackageSymbols = map[string][]Symbol{ {"(*Transport).CancelRequest", Method, 1, ""}, {"(*Transport).Clone", Method, 13, ""}, {"(*Transport).CloseIdleConnections", Method, 0, ""}, + {"(*Transport).NewClientConn", Method, 26, ""}, {"(*Transport).RegisterProtocol", Method, 0, ""}, {"(*Transport).RoundTrip", Method, 0, ""}, + {"(CloseNotifier).CloseNotify", Method, 1, ""}, {"(ConnState).String", Method, 3, ""}, + {"(CookieJar).Cookies", Method, 0, ""}, + {"(CookieJar).SetCookies", Method, 0, ""}, {"(Dir).Open", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).Readdir", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, + {"(File).Stat", Method, 0, ""}, + {"(FileSystem).Open", Method, 0, ""}, + {"(Flusher).Flush", Method, 0, ""}, + {"(Handler).ServeHTTP", Method, 0, ""}, {"(HandlerFunc).ServeHTTP", Method, 0, ""}, {"(Header).Add", Method, 0, ""}, {"(Header).Clone", Method, 13, ""}, @@ -8405,10 +8816,16 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14, ""}, {"(Header).Write", Method, 0, ""}, {"(Header).WriteSubset", Method, 0, ""}, + {"(Hijacker).Hijack", Method, 0, ""}, {"(Protocols).HTTP1", Method, 24, ""}, {"(Protocols).HTTP2", Method, 24, ""}, {"(Protocols).String", Method, 24, ""}, {"(Protocols).UnencryptedHTTP2", Method, 24, ""}, + {"(Pusher).Push", Method, 8, ""}, + {"(ResponseWriter).Header", Method, 0, ""}, + {"(ResponseWriter).Write", Method, 0, ""}, + {"(ResponseWriter).WriteHeader", Method, 0, ""}, + {"(RoundTripper).RoundTrip", Method, 0, ""}, {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"}, {"CanonicalHeaderKey", Func, 0, "func(s string) string"}, {"Client", Type, 0, ""}, @@ -8416,6 +8833,7 @@ var PackageSymbols = map[string][]Symbol{ {"Client.Jar", Field, 0, ""}, {"Client.Timeout", Field, 3, ""}, {"Client.Transport", Field, 0, ""}, + {"ClientConn", Type, 26, ""}, {"CloseNotifier", Type, 1, ""}, {"ConnState", Type, 3, ""}, {"Cookie", Type, 0, ""}, @@ -8726,6 +9144,8 @@ var PackageSymbols = map[string][]Symbol{ "net/http/cookiejar": { {"(*Jar).Cookies", Method, 1, ""}, {"(*Jar).SetCookies", Method, 1, ""}, + {"(PublicSuffixList).PublicSuffix", Method, 1, ""}, + {"(PublicSuffixList).String", Method, 1, ""}, {"Jar", Type, 1, ""}, {"New", Func, 1, "func(o *Options) (*Jar, error)"}, {"Options", Type, 1, ""}, @@ -8819,6 +9239,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ServerConn).Pending", Method, 0, ""}, {"(*ServerConn).Read", Method, 0, ""}, {"(*ServerConn).Write", Method, 0, ""}, + {"(BufferPool).Get", Method, 6, ""}, + {"(BufferPool).Put", Method, 6, ""}, {"BufferPool", Type, 6, ""}, {"ClientConn", Type, 0, ""}, {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"}, @@ -8972,6 +9394,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Server).ServeConn", Method, 0, ""}, {"(*Server).ServeHTTP", Method, 0, ""}, {"(*Server).ServeRequest", Method, 0, ""}, + {"(ClientCodec).Close", Method, 0, ""}, + {"(ClientCodec).ReadResponseBody", Method, 0, ""}, + {"(ClientCodec).ReadResponseHeader", Method, 0, ""}, + {"(ClientCodec).WriteRequest", Method, 0, ""}, + {"(ServerCodec).Close", Method, 0, ""}, + {"(ServerCodec).ReadRequestBody", Method, 0, ""}, + {"(ServerCodec).ReadRequestHeader", Method, 0, ""}, + {"(ServerCodec).WriteResponse", Method, 0, ""}, {"(ServerError).Error", Method, 0, ""}, {"Accept", Func, 0, "func(lis net.Listener)"}, {"Call", Type, 0, ""}, @@ -9030,6 +9460,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).StartTLS", Method, 0, ""}, {"(*Client).TLSConnectionState", Method, 5, ""}, {"(*Client).Verify", Method, 0, ""}, + {"(Auth).Next", Method, 0, ""}, + {"(Auth).Start", Method, 0, ""}, {"Auth", Type, 0, ""}, {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"}, {"Client", Type, 0, ""}, @@ -9241,10 +9673,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyscallError).Error", Method, 0, ""}, {"(*SyscallError).Timeout", Method, 10, ""}, {"(*SyscallError).Unwrap", Method, 13, ""}, + {"(FileInfo).IsDir", Method, 0, ""}, + {"(FileInfo).ModTime", Method, 0, ""}, + {"(FileInfo).Mode", Method, 0, ""}, + {"(FileInfo).Name", Method, 0, ""}, + {"(FileInfo).Size", Method, 0, ""}, + {"(FileInfo).Sys", Method, 0, ""}, {"(FileMode).IsDir", Method, 0, ""}, {"(FileMode).IsRegular", Method, 1, ""}, {"(FileMode).Perm", Method, 0, ""}, {"(FileMode).String", Method, 0, ""}, + {"(Signal).Signal", Method, 0, ""}, + {"(Signal).String", Method, 0, ""}, {"Args", Var, 0, ""}, {"Chdir", Func, 0, "func(dir string) error"}, {"Chmod", Func, 0, "func(name string, mode FileMode) error"}, @@ -9521,6 +9961,45 @@ var PackageSymbols = map[string][]Symbol{ {"(StructField).IsExported", Method, 17, ""}, {"(StructTag).Get", Method, 0, ""}, {"(StructTag).Lookup", Method, 7, ""}, + {"(Type).Align", Method, 0, ""}, + {"(Type).AssignableTo", Method, 0, ""}, + {"(Type).Bits", Method, 0, ""}, + {"(Type).CanSeq", Method, 23, ""}, + {"(Type).CanSeq2", Method, 23, ""}, + {"(Type).ChanDir", Method, 0, ""}, + {"(Type).Comparable", Method, 4, ""}, + {"(Type).ConvertibleTo", Method, 1, ""}, + {"(Type).Elem", Method, 0, ""}, + {"(Type).Field", Method, 0, ""}, + {"(Type).FieldAlign", Method, 0, ""}, + {"(Type).FieldByIndex", Method, 0, ""}, + {"(Type).FieldByName", Method, 0, ""}, + {"(Type).FieldByNameFunc", Method, 0, ""}, + {"(Type).Fields", Method, 26, ""}, + {"(Type).Implements", Method, 0, ""}, + {"(Type).In", Method, 0, ""}, + {"(Type).Ins", Method, 26, ""}, + {"(Type).IsVariadic", Method, 0, ""}, + {"(Type).Key", Method, 0, ""}, + {"(Type).Kind", Method, 0, ""}, + {"(Type).Len", Method, 0, ""}, + {"(Type).Method", Method, 0, ""}, + {"(Type).MethodByName", Method, 0, ""}, + {"(Type).Methods", Method, 26, ""}, + {"(Type).Name", Method, 0, ""}, + {"(Type).NumField", Method, 0, ""}, + {"(Type).NumIn", Method, 0, ""}, + {"(Type).NumMethod", Method, 0, ""}, + {"(Type).NumOut", Method, 0, ""}, + {"(Type).Out", Method, 0, ""}, + {"(Type).Outs", Method, 26, ""}, + {"(Type).OverflowComplex", Method, 23, ""}, + {"(Type).OverflowFloat", Method, 23, ""}, + {"(Type).OverflowInt", Method, 23, ""}, + {"(Type).OverflowUint", Method, 23, ""}, + {"(Type).PkgPath", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"(Value).Addr", Method, 0, ""}, {"(Value).Bool", Method, 0, ""}, {"(Value).Bytes", Method, 0, ""}, @@ -9547,6 +10026,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).FieldByIndexErr", Method, 18, ""}, {"(Value).FieldByName", Method, 0, ""}, {"(Value).FieldByNameFunc", Method, 0, ""}, + {"(Value).Fields", Method, 26, ""}, {"(Value).Float", Method, 0, ""}, {"(Value).Grow", Method, 20, ""}, {"(Value).Index", Method, 0, ""}, @@ -9563,6 +10043,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).MapRange", Method, 12, ""}, {"(Value).Method", Method, 0, ""}, {"(Value).MethodByName", Method, 0, ""}, + {"(Value).Methods", Method, 26, ""}, {"(Value).NumField", Method, 0, ""}, {"(Value).NumMethod", Method, 0, ""}, {"(Value).OverflowComplex", Method, 0, ""}, @@ -9678,7 +10159,6 @@ var PackageSymbols = map[string][]Symbol{ {"StructOf", Func, 7, "func(fields []StructField) Type"}, {"StructTag", Type, 0, ""}, {"Swapper", Func, 8, "func(slice any) func(i int, j int)"}, - {"Type", Type, 0, ""}, {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"}, {"TypeFor", Func, 22, "func[T any]() Type"}, {"TypeOf", Func, 0, "func(i any) Type"}, @@ -9880,6 +10360,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeAssertionError).Error", Method, 0, ""}, {"(*TypeAssertionError).RuntimeError", Method, 0, ""}, {"(Cleanup).Stop", Method, 24, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).RuntimeError", Method, 0, ""}, {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"}, {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"}, {"BlockProfileRecord", Type, 1, ""}, @@ -10154,6 +10636,9 @@ var PackageSymbols = map[string][]Symbol{ {"(IntSlice).Search", Method, 0, ""}, {"(IntSlice).Sort", Method, 0, ""}, {"(IntSlice).Swap", Method, 0, ""}, + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"(StringSlice).Len", Method, 0, ""}, {"(StringSlice).Less", Method, 0, ""}, {"(StringSlice).Search", Method, 0, ""}, @@ -10345,6 +10830,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*WaitGroup).Done", Method, 0, ""}, {"(*WaitGroup).Go", Method, 25, ""}, {"(*WaitGroup).Wait", Method, 0, ""}, + {"(Locker).Lock", Method, 0, ""}, + {"(Locker).Unlock", Method, 0, ""}, {"Cond", Type, 0, ""}, {"Cond.L", Field, 0, ""}, {"Locker", Type, 0, ""}, @@ -10486,10 +10973,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Timeval).Nano", Method, 0, ""}, {"(*Timeval).Nanoseconds", Method, 0, ""}, {"(*Timeval).Unix", Method, 0, ""}, + {"(Conn).SyscallConn", Method, 9, ""}, {"(Errno).Error", Method, 0, ""}, {"(Errno).Is", Method, 13, ""}, {"(Errno).Temporary", Method, 0, ""}, {"(Errno).Timeout", Method, 0, ""}, + {"(RawConn).Control", Method, 9, ""}, + {"(RawConn).Read", Method, 9, ""}, + {"(RawConn).Write", Method, 9, ""}, {"(Signal).Signal", Method, 0, ""}, {"(Signal).String", Method, 0, ""}, {"(Token).Close", Method, 0, ""}, @@ -14409,7 +14900,7 @@ var PackageSymbols = map[string][]Symbol{ {"RouteMessage.Data", Field, 0, ""}, {"RouteMessage.Header", Field, 0, ""}, {"RouteRIB", Func, 0, ""}, - {"RoutingMessage", Type, 0, ""}, + {"RoutingMessage", Type, 14, ""}, {"RtAttr", Type, 0, ""}, {"RtAttr.Len", Field, 0, ""}, {"RtAttr.Type", Field, 0, ""}, @@ -15895,7 +16386,6 @@ var PackageSymbols = map[string][]Symbol{ {"SockFprog.Filter", Field, 0, ""}, {"SockFprog.Len", Field, 0, ""}, {"SockFprog.Pad_cgo_0", Field, 0, ""}, - {"Sockaddr", Type, 0, ""}, {"SockaddrDatalink", Type, 0, ""}, {"SockaddrDatalink.Alen", Field, 0, ""}, {"SockaddrDatalink.Data", Field, 0, ""}, @@ -16801,6 +17291,29 @@ var PackageSymbols = map[string][]Symbol{ {"(BenchmarkResult).MemString", Method, 1, ""}, {"(BenchmarkResult).NsPerOp", Method, 0, ""}, {"(BenchmarkResult).String", Method, 0, ""}, + {"(TB).ArtifactDir", Method, 26, ""}, + {"(TB).Attr", Method, 25, ""}, + {"(TB).Chdir", Method, 24, ""}, + {"(TB).Cleanup", Method, 14, ""}, + {"(TB).Context", Method, 24, ""}, + {"(TB).Error", Method, 2, ""}, + {"(TB).Errorf", Method, 2, ""}, + {"(TB).Fail", Method, 2, ""}, + {"(TB).FailNow", Method, 2, ""}, + {"(TB).Failed", Method, 2, ""}, + {"(TB).Fatal", Method, 2, ""}, + {"(TB).Fatalf", Method, 2, ""}, + {"(TB).Helper", Method, 9, ""}, + {"(TB).Log", Method, 2, ""}, + {"(TB).Logf", Method, 2, ""}, + {"(TB).Name", Method, 8, ""}, + {"(TB).Output", Method, 25, ""}, + {"(TB).Setenv", Method, 17, ""}, + {"(TB).Skip", Method, 2, ""}, + {"(TB).SkipNow", Method, 2, ""}, + {"(TB).Skipf", Method, 2, ""}, + {"(TB).Skipped", Method, 2, ""}, + {"(TB).TempDir", Method, 15, ""}, {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"}, {"B", Type, 0, ""}, {"B.N", Field, 0, ""}, @@ -16851,7 +17364,6 @@ var PackageSymbols = map[string][]Symbol{ {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"}, {"Short", Func, 0, "func() bool"}, {"T", Type, 0, ""}, - {"TB", Type, 2, ""}, {"Testing", Func, 21, "func() bool"}, {"Verbose", Func, 1, "func() bool"}, }, @@ -16887,6 +17399,7 @@ var PackageSymbols = map[string][]Symbol{ "testing/quick": { {"(*CheckEqualError).Error", Method, 0, ""}, {"(*CheckError).Error", Method, 0, ""}, + {"(Generator).Generate", Method, 0, ""}, {"(SetupError).Error", Method, 0, ""}, {"Check", Func, 0, "func(f any, config *Config) error"}, {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"}, @@ -17093,6 +17606,10 @@ var PackageSymbols = map[string][]Symbol{ {"(ListNode).Position", Method, 1, ""}, {"(ListNode).Type", Method, 0, ""}, {"(NilNode).Position", Method, 1, ""}, + {"(Node).Copy", Method, 0, ""}, + {"(Node).Position", Method, 1, ""}, + {"(Node).String", Method, 0, ""}, + {"(Node).Type", Method, 0, ""}, {"(NodeType).Type", Method, 0, ""}, {"(NumberNode).Position", Method, 1, ""}, {"(NumberNode).Type", Method, 0, ""}, diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index e223e0f34..59a5de36a 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -39,7 +39,7 @@ const ( Var // "EOF" Const // "Pi" Field // "Point.X" - Method // "(*Buffer).Grow" + Method // "(*Buffer).Grow" or "(Reader).Read" ) func (kind Kind) String() string { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go index 3db2a135b..7ebe9768b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go @@ -8,7 +8,7 @@ import ( "fmt" "go/ast" "go/types" - _ "unsafe" + _ "unsafe" // for go:linkname hack ) // CallKind describes the function position of an [*ast.CallExpr]. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index fef74a785..51001666e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -23,7 +23,6 @@ import ( "go/token" "go/types" "reflect" - "unsafe" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" @@ -40,8 +39,7 @@ func SetUsesCgo(conf *types.Config) bool { } } - addr := unsafe.Pointer(f.UnsafeAddr()) - *(*bool)(addr) = true + *(*bool)(f.Addr().UnsafePointer()) = true return true } diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index a5f4e3252..cdd36c388 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -9,6 +9,7 @@ package versions // named constants, to avoid misspelling const ( + Go1_17 = "go1.17" Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" diff --git a/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go new file mode 100644 index 000000000..40ec27d87 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go @@ -0,0 +1,303 @@ +/* +Copyright 2013 Google Inc. All Rights Reserved. +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verbosity + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +// New returns a struct that implements -v and -vmodule support. Changing and +// checking these settings is thread-safe, with all concurrency issues handled +// internally. +func New() *VState { + vs := new(VState) + + // The two fields must have a pointer to the overal struct for their + // implementation of Set. + vs.vmodule.vs = vs + vs.verbosity.vs = vs + + return vs +} + +// Value is an extension that makes it possible to use the values in pflag. +type Value interface { + flag.Value + Type() string +} + +func (vs *VState) V() Value { + return &vs.verbosity +} + +func (vs *VState) VModule() Value { + return &vs.vmodule +} + +// VState contains settings and state. Some of its fields can be accessed +// through atomic read/writes, in other cases a mutex must be held. +type VState struct { + mu sync.Mutex + + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity levelSpec // V logging level, the value of the -v flag/ + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 +} + +// Level must be an int32 to support atomic read/writes. +type Level int32 + +type levelSpec struct { + vs *VState + l Level +} + +// get returns the value of the level. +func (l *levelSpec) get() Level { + return Level(atomic.LoadInt32((*int32)(&l.l))) +} + +// set sets the value of the level. +func (l *levelSpec) set(val Level) { + atomic.StoreInt32((*int32)(&l.l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *levelSpec) String() string { + return strconv.FormatInt(int64(l.l), 10) +} + +// Get is part of the flag.Getter interface. It returns the +// verbosity level as int32. +func (l *levelSpec) Get() interface{} { + return int32(l.l) +} + +// Type is part of pflag.Value. +func (l *levelSpec) Type() string { + return "Level" +} + +// Set is part of the flag.Value interface. +func (l *levelSpec) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + l.vs.mu.Lock() + defer l.vs.mu.Unlock() + l.vs.set(Level(v), l.vs.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + vs *VState + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + // Empty instances don't have and don't need a lock (can + // happen when flag uses introspection). + if m.vs != nil { + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + } + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +// Type is part of pflag.Value +func (m *moduleSpec) Type() string { + return "pattern=N,..." +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Set will sets module value +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + m.vs.set(m.vs.verbosity.l, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// set sets a consistent state for V logging. +// The mutex must be held. +func (vs *VState) set(l Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + vs.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&vs.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + vs.vmodule.filter = filter + vs.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&vs.filterLength, int32(len(filter))) + vs.verbosity.set(l) +} + +// Enabled checks whether logging is enabled at the given level. This must be +// called with depth=0 when the caller of enabled will do the logging and +// higher values when more stack levels need to be skipped. +// +// The mutex will be locked only if needed. +func (vs *VState) Enabled(level Level, depth int) bool { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if vs.verbosity.get() >= level { + return true + } + + // It's off globally but vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&vs.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + vs.mu.Lock() + defer vs.mu.Unlock() + if runtime.Callers(depth+2, vs.pcs[:]) == 0 { + return false + } + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := vs.pcs[0] - 1 + v, ok := vs.vmap[pc] + if !ok { + v = vs.setV(pc) + } + return v >= level + } + return false +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// Mutex is held. +func (vs *VState) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + file = strings.TrimSuffix(file, ".go") + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range vs.vmodule.filter { + if filter.match(file) { + vs.vmap[pc] = filter.level + return filter.level + } + } + vs.vmap[pc] = 0 + return 0 +} diff --git a/vendor/k8s.io/klog/v2/textlogger/options.go b/vendor/k8s.io/klog/v2/textlogger/options.go new file mode 100644 index 000000000..b1c4eefb3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/options.go @@ -0,0 +1,154 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "flag" + "io" + "os" + "strconv" + "time" + + "k8s.io/klog/v2/internal/verbosity" +) + +// Config influences logging in a text logger. To make this configurable via +// command line flags, instantiate this once per program and use AddFlags to +// bind command line flags to the instance before passing it to NewTestContext. +// +// Must be constructed with NewConfig. +type Config struct { + vstate *verbosity.VState + co configOptions +} + +// Verbosity returns a value instance that can be used to query (via String) or +// modify (via Set) the verbosity threshold. This is thread-safe and can be +// done at runtime. +func (c *Config) Verbosity() flag.Value { + return c.vstate.V() +} + +// VModule returns a value instance that can be used to query (via String) or +// modify (via Set) the vmodule settings. This is thread-safe and can be done +// at runtime. +func (c *Config) VModule() flag.Value { + return c.vstate.VModule() +} + +// ConfigOption implements functional parameters for NewConfig. +type ConfigOption func(co *configOptions) + +type configOptions struct { + verbosityFlagName string + vmoduleFlagName string + verbosityDefault int + fixedTime *time.Time + unwind func(int) (string, int) + output io.Writer +} + +// VerbosityFlagName overrides the default -v for the verbosity level. +func VerbosityFlagName(name string) ConfigOption { + return func(co *configOptions) { + + co.verbosityFlagName = name + } +} + +// VModulFlagName overrides the default -vmodule for the per-module +// verbosity levels. +func VModuleFlagName(name string) ConfigOption { + return func(co *configOptions) { + co.vmoduleFlagName = name + } +} + +// Verbosity overrides the default verbosity level of 0. +// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions +// for log level conventions in Kubernetes. +func Verbosity(level int) ConfigOption { + return func(co *configOptions) { + co.verbosityDefault = level + } +} + +// Output overrides stderr as the output stream. +func Output(output io.Writer) ConfigOption { + return func(co *configOptions) { + co.output = output + } +} + +// FixedTime overrides the actual time with a fixed time. Useful only for testing. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FixedTime(ts time.Time) ConfigOption { + return func(co *configOptions) { + co.fixedTime = &ts + } +} + +// Backtrace overrides the default mechanism for determining the call site. +// The callback is invoked with the number of function calls between itself +// and the call site. It must return the file name and line number. An empty +// file name indicates that the information is unknown. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption { + return func(co *configOptions) { + co.unwind = unwind + } +} + +// NewConfig returns a configuration with recommended defaults and optional +// modifications. Command line flags are not bound to any FlagSet yet. +func NewConfig(opts ...ConfigOption) *Config { + c := &Config{ + vstate: verbosity.New(), + co: configOptions{ + verbosityFlagName: "v", + vmoduleFlagName: "vmodule", + verbosityDefault: 0, + unwind: runtimeBacktrace, + output: os.Stderr, + }, + } + for _, opt := range opts { + opt(&c.co) + } + + // Cannot fail for this input. + _ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10)) + return c +} + +// AddFlags registers the command line flags that control the configuration. +// +// The default flag names are the same as in klog, so unless those defaults +// are changed, either klog.InitFlags or Config.AddFlags can be used for the +// same flag set, but not both. +func (c *Config) AddFlags(fs *flag.FlagSet) { + fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger") + fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns") +} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go new file mode 100644 index 000000000..0b55a2994 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go @@ -0,0 +1,187 @@ +/* +Copyright 2019 The Kubernetes Authors. +Copyright 2020 Intel Corporation. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package textlogger contains an implementation of the logr interface which is +// producing the exact same output as klog. It does not route output through +// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be +// configured through its own [NewConfig] and [Config.AddFlags]. +package textlogger + +import ( + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/verbosity" +) + +var ( + // TimeNow is used to retrieve the current time. May be changed for testing. + TimeNow = time.Now +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewLogger constructs a new logger. +// +// Verbosity can be modified at any time through the Config.V and +// Config.VModule API. +func NewLogger(c *Config) logr.Logger { + return logr.New(&tlogger{ + values: nil, + config: c, + }) +} + +type tlogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string + config *Config +} + +func (l *tlogger) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *tlogger) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *tlogger) Enabled(level int) bool { + return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth) +} + +func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) { + l.print(nil, severity.InfoLog, msg, kvList) +} + +func (l *tlogger) Error(err error, msg string, kvList ...interface{}) { + l.print(err, severity.ErrorLog, msg, kvList) +} + +func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) { + // Determine caller. + // +1 for this frame, +1 for Info/Error. + skip := l.callDepth + 2 + file, line := l.config.co.unwind(skip) + if file == "" { + file = "???" + line = 1 + } else if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + l.printWithInfos(file, line, time.Now(), err, s, msg, kvList) +} + +func runtimeBacktrace(skip int) (string, int) { + _, file, line, ok := runtime.Caller(skip + 1) + if !ok { + return "", 0 + } + return file, line +} + +func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + defer buffer.PutBuffer(b) + + // Format header. + if l.config.co.fixedTime != nil { + now = *l.config.co.fixedTime + } + b.FormatHeader(s, file, line, now) + + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVFormat(&b.Buffer, "err", err) + } + serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList) + if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' { + b.WriteByte('\n') + } + _, _ = l.config.co.output.Write(b.Bytes()) +} + +func (l *tlogger) WriteKlogBuffer(data []byte) { + _, _ = l.config.co.output.Write(data) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l *tlogger) WithName(name string) logr.LogSink { + clone := *l + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + clone.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + clone.values = v + clone.hasPrefix = true + } + return &clone +} + +func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink { + clone := *l + clone.values = serialize.WithValues(l.values, kvList) + return &clone +} + +// KlogBufferWriter is implemented by the textlogger LogSink. +type KlogBufferWriter interface { + // WriteKlogBuffer takes a pre-formatted buffer prepared by klog and + // writes it unchanged to the output stream. Can be used with + // klog.WriteKlogBuffer when setting a logger through + // klog.SetLoggerWithOptions. + WriteKlogBuffer([]byte) +} + +var _ logr.LogSink = &tlogger{} +var _ logr.CallDepthLogSink = &tlogger{} +var _ KlogBufferWriter = &tlogger{} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go new file mode 100644 index 000000000..c888ef8a6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go @@ -0,0 +1,52 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *tlogger) Handle(ctx context.Context, record slog.Record) error { + return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos) +} + +func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *tlogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &tlogger{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go index 6eee935b2..830ec3ca0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -92,10 +92,21 @@ type OpenAPICanonicalTypeNamer interface { OpenAPICanonicalTypeName() string } +// OpenAPIModelNamer is an interface Go types may implement to provide an OpenAPI model name. +// +// This takes precedence over OpenAPICanonicalTypeNamer, and should be used when a Go type has a model +// name that differs from its canonical type name as determined by Go package name reflection. +type OpenAPIModelNamer interface { + OpenAPIModelName() string +} + // GetCanonicalTypeName will find the canonical type name of a sample object, removing // the "vendor" part of the path func GetCanonicalTypeName(model interface{}) string { - if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { + switch namer := model.(type) { + case OpenAPIModelNamer: + return namer.OpenAPIModelName() + case OpenAPICanonicalTypeNamer: return namer.OpenAPICanonicalTypeName() } t := reflect.TypeOf(model) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go index 97b2f989e..23109816e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go @@ -17,7 +17,6 @@ package strfmt import ( "encoding/base64" "encoding/json" - "fmt" "net/mail" "regexp" "strings" @@ -247,29 +246,6 @@ func (b *Base64) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (b *Base64) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v))) - n, err := base64.StdEncoding.Decode(dbuf, v) - if err != nil { - return err - } - *b = dbuf[:n] - case string: - vv, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err - } - *b = Base64(vv) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v", v) - } - - return nil -} - func (b Base64) String() string { return base64.StdEncoding.EncodeToString([]byte(b)) } @@ -324,20 +300,6 @@ func (u *URI) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *URI) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = URI(string(v)) - case string: - *u = URI(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return nil -} - func (u URI) String() string { return string(u) } @@ -388,20 +350,6 @@ func (e *Email) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (e *Email) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *e = Email(string(v)) - case string: - *e = Email(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v", v) - } - - return nil -} - func (e Email) String() string { return string(e) } @@ -452,20 +400,6 @@ func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *Hostname) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = Hostname(string(v)) - case string: - *h = Hostname(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v", v) - } - - return nil -} - func (h Hostname) String() string { return string(h) } @@ -516,20 +450,6 @@ func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv4(string(v)) - case string: - *u = IPv4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u IPv4) String() string { return string(u) } @@ -580,20 +500,6 @@ func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv6) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv6(string(v)) - case string: - *u = IPv6(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v", v) - } - - return nil -} - func (u IPv6) String() string { return string(u) } @@ -644,20 +550,6 @@ func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *CIDR) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CIDR(string(v)) - case string: - *u = CIDR(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v", v) - } - - return nil -} - func (u CIDR) String() string { return string(u) } @@ -708,20 +600,6 @@ func (u *MAC) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *MAC) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = MAC(string(v)) - case string: - *u = MAC(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u MAC) String() string { return string(u) } @@ -772,20 +650,6 @@ func (u *UUID) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *UUID) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID(string(v)) - case string: - *u = UUID(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v", v) - } - - return nil -} - func (u UUID) String() string { return string(u) } @@ -839,20 +703,6 @@ func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID3) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID3(string(v)) - case string: - *u = UUID3(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v", v) - } - - return nil -} - func (u UUID3) String() string { return string(u) } @@ -906,20 +756,6 @@ func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID4(string(v)) - case string: - *u = UUID4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v", v) - } - - return nil -} - func (u UUID4) String() string { return string(u) } @@ -973,20 +809,6 @@ func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID5) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID5(string(v)) - case string: - *u = UUID5(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v", v) - } - - return nil -} - func (u UUID5) String() string { return string(u) } @@ -1040,20 +862,6 @@ func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *ISBN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN(string(v)) - case string: - *u = ISBN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v", v) - } - - return nil -} - func (u ISBN) String() string { return string(u) } @@ -1107,20 +915,6 @@ func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN10) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN10(string(v)) - case string: - *u = ISBN10(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v", v) - } - - return nil -} - func (u ISBN10) String() string { return string(u) } @@ -1174,20 +968,6 @@ func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN13) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN13(string(v)) - case string: - *u = ISBN13(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v", v) - } - - return nil -} - func (u ISBN13) String() string { return string(u) } @@ -1241,20 +1021,6 @@ func (u *CreditCard) UnmarshalText(data []byte) error { // validation is perform return nil } -// Scan read a value from a database driver -func (u *CreditCard) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CreditCard(string(v)) - case string: - *u = CreditCard(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v", v) - } - - return nil -} - func (u CreditCard) String() string { return string(u) } @@ -1308,20 +1074,6 @@ func (u *SSN) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *SSN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = SSN(string(v)) - case string: - *u = SSN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v", v) - } - - return nil -} - func (u SSN) String() string { return string(u) } @@ -1375,20 +1127,6 @@ func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *HexColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = HexColor(string(v)) - case string: - *h = HexColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v", v) - } - - return nil -} - func (h HexColor) String() string { return string(h) } @@ -1442,20 +1180,6 @@ func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *RGBColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = RGBColor(string(v)) - case string: - *r = RGBColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v", v) - } - - return nil -} - func (r RGBColor) String() string { return string(r) } @@ -1510,20 +1234,6 @@ func (r *Password) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *Password) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = Password(string(v)) - case string: - *r = Password(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v", v) - } - - return nil -} - func (r Password) String() string { return string(r) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go index 8fbeb635f..04545296b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go @@ -119,23 +119,6 @@ func ParseDuration(cand string) (time.Duration, error) { return 0, fmt.Errorf("unable to parse %s as duration", cand) } -// Scan reads a Duration value from database driver type. -func (d *Duration) Scan(raw interface{}) error { - switch v := raw.(type) { - // TODO: case []byte: // ? - case int64: - *d = Duration(v) - case float64: - *d = Duration(int64(v)) - case nil: - *d = Duration(0) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v", v) - } - - return nil -} - // String converts this duration to a string func (d Duration) String() string { return time.Duration(d).String() diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go index b2324db05..d0fd31a9d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go @@ -16,7 +16,6 @@ package strfmt import ( "encoding/json" - "fmt" "regexp" "strings" "time" @@ -114,25 +113,6 @@ func (t *DateTime) UnmarshalText(text []byte) error { return nil } -// Scan scans a DateTime value from database driver type. -func (t *DateTime) Scan(raw interface{}) error { - // TODO: case int64: and case float64: ? - switch v := raw.(type) { - case []byte: - return t.UnmarshalText(v) - case string: - return t.UnmarshalText([]byte(v)) - case time.Time: - *t = DateTime(v) - case nil: - *t = DateTime{} - default: - return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v", v) - } - - return nil -} - // MarshalJSON returns the DateTime as JSON func (t DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(time.Time(t).Format(MarshalFormat)) diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go index 7cb7795be..e5d508055 100644 --- a/vendor/k8s.io/utils/net/multi_listen.go +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" ) // connErrPair pairs conn and error which is returned by accept on sub-listeners. @@ -38,6 +39,7 @@ type multiListener struct { connCh chan connErrPair // stopCh communicates from parent to child listeners. stopCh chan struct{} + closed atomic.Bool } // compile time check to ensure *multiListener implements net.Listener @@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) { // the go-routines to exit. func (ml *multiListener) Close() error { // Make sure this can be called repeatedly without explosions. - select { - case <-ml.stopCh: + if !ml.closed.CompareAndSwap(false, true) { return fmt.Errorf("use of closed network connection") - default: } // Tell all sub-listeners to stop. diff --git a/vendor/modules.txt b/vendor/modules.txt index ed132d3cc..8cbeb6336 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -86,11 +86,11 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cenkalti/backoff/v5 v5.0.2 +# github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 -# github.com/cert-manager/cert-manager v1.18.2 -## explicit; go 1.23.0 +# github.com/cert-manager/cert-manager v1.19.2 +## explicit; go 1.25.0 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 github.com/cert-manager/cert-manager/pkg/apis/certmanager @@ -200,10 +200,29 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider +# github.com/cucumber/gherkin/go/v26 v26.2.0 +## explicit; go 1.19 +github.com/cucumber/gherkin/go/v26 +# github.com/cucumber/godog v0.15.1 +## explicit; go 1.16 +github.com/cucumber/godog +github.com/cucumber/godog/colors +github.com/cucumber/godog/formatters +github.com/cucumber/godog/internal/builder +github.com/cucumber/godog/internal/flags +github.com/cucumber/godog/internal/formatters +github.com/cucumber/godog/internal/models +github.com/cucumber/godog/internal/parser +github.com/cucumber/godog/internal/storage +github.com/cucumber/godog/internal/tags +github.com/cucumber/godog/internal/utils +# github.com/cucumber/messages/go/v21 v21.0.1 +## explicit; go 1.19 +github.com/cucumber/messages/go/v21 # github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.6.0 +# github.com/cyphar/filepath-securejoin v0.6.1 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin github.com/cyphar/filepath-securejoin/internal/consts @@ -298,11 +317,11 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/jsonpointer v0.22.0 -## explicit; go 1.20 +# github.com/go-openapi/jsonpointer v0.22.1 +## explicit; go 1.24.0 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.21.1 -## explicit; go 1.20 +# github.com/go-openapi/jsonreference v0.21.2 +## explicit; go 1.24.0 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal # github.com/go-openapi/swag v0.24.1 @@ -317,8 +336,8 @@ github.com/go-openapi/swag/conv # github.com/go-openapi/swag/fileutils v0.24.0 ## explicit; go 1.20.0 github.com/go-openapi/swag/fileutils -# github.com/go-openapi/swag/jsonname v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/jsonname v0.25.1 +## explicit; go 1.24.0 github.com/go-openapi/swag/jsonname # github.com/go-openapi/swag/jsonutils v0.24.0 ## explicit; go 1.20 @@ -354,6 +373,9 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/uuid v4.3.1+incompatible +## explicit +github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto @@ -467,7 +489,7 @@ github.com/gosuri/uitable/util/wordwrap # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -484,9 +506,18 @@ github.com/h2non/go-is-svg # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-immutable-radix v1.3.1 +## explicit +github.com/hashicorp/go-immutable-radix +# github.com/hashicorp/go-memdb v1.3.4 +## explicit; go 1.13 +github.com/hashicorp/go-memdb # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/golang-lru v0.5.4 +## explicit; go 1.12 +github.com/hashicorp/golang-lru/simplelru # github.com/huandu/xstrings v1.5.0 ## explicit; go 1.12 github.com/huandu/xstrings @@ -509,7 +540,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.1 +# github.com/klauspost/compress v1.18.2 ## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -637,10 +668,11 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runtime-spec v1.2.1 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/operator-framework/api v0.36.0 +# github.com/operator-framework/api v0.37.0 ## explicit; go 1.24.6 github.com/operator-framework/api/pkg/constraints github.com/operator-framework/api/pkg/encoding +github.com/operator-framework/api/pkg/lib/release github.com/operator-framework/api/pkg/lib/version github.com/operator-framework/api/pkg/manifests github.com/operator-framework/api/pkg/operators @@ -761,7 +793,7 @@ github.com/smallstep/pkcs7/internal/legacy/x509 # github.com/spf13/cast v1.7.1 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.10.1 +# github.com/spf13/cobra v1.10.2 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.10 @@ -847,11 +879,11 @@ go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.34.0 go.opentelemetry.io/otel/semconv/v1.37.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal @@ -971,7 +1003,7 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.45.0 +# golang.org/x/crypto v0.46.0 ## explicit; go 1.24.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -993,17 +1025,17 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b +# golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 ## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.30.0 +# golang.org/x/mod v0.31.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.47.0 +# golang.org/x/net v0.48.0 ## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/html @@ -1022,22 +1054,22 @@ golang.org/x/net/websocket ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.18.0 +# golang.org/x/sync v0.19.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.38.0 +# golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.37.0 +# golang.org/x/term v0.38.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.31.0 +# golang.org/x/text v0.32.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding @@ -1065,7 +1097,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.13.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.39.0 +# golang.org/x/tools v0.40.0 ## explicit; go 1.24.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/analysistest @@ -1240,7 +1272,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.19.3 +# helm.sh/helm/v3 v3.19.4 ## explicit; go 1.24.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1279,7 +1311,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.34.1 => k8s.io/api v0.34.0 +# k8s.io/api v0.34.2 => k8s.io/api v0.34.0 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1341,7 +1373,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.1 => k8s.io/apiextensions-apiserver v0.34.0 +# k8s.io/apiextensions-apiserver v0.34.2 => k8s.io/apiextensions-apiserver v0.34.0 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1363,7 +1395,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.34.1 => k8s.io/apimachinery v0.34.0 +# k8s.io/apimachinery v0.34.2 => k8s.io/apimachinery v0.34.0 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1437,7 +1469,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.1 => k8s.io/apiserver v0.34.0 +# k8s.io/apiserver v0.34.2 => k8s.io/apiserver v0.34.0 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install @@ -1486,13 +1518,13 @@ k8s.io/apiserver/pkg/warning k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.34.0 => k8s.io/cli-runtime v0.34.0 +# k8s.io/cli-runtime v0.34.2 => k8s.io/cli-runtime v0.34.0 ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.34.1 => k8s.io/client-go v0.34.0 +# k8s.io/client-go v0.34.2 => k8s.io/client-go v0.34.0 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1856,7 +1888,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.34.1 => k8s.io/component-base v0.34.0 +# k8s.io/component-base v0.34.2 => k8s.io/component-base v0.34.0 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -1884,8 +1916,10 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b -## explicit; go 1.23 +k8s.io/klog/v2/internal/verbosity +k8s.io/klog/v2/textlogger +# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 +## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 @@ -1902,7 +1936,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.34.0 => k8s.io/kubectl v0.34.0 +# k8s.io/kubectl v0.34.2 => k8s.io/kubectl v0.34.0 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme @@ -1919,7 +1953,7 @@ k8s.io/kubernetes/pkg/apis/rbac/v1 k8s.io/kubernetes/pkg/registry/rbac k8s.io/kubernetes/pkg/registry/rbac/validation k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac -# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 +# k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -2059,10 +2093,10 @@ sigs.k8s.io/crdify/pkg/validations/property sigs.k8s.io/crdify/pkg/validators/crd sigs.k8s.io/crdify/pkg/validators/version/same sigs.k8s.io/crdify/pkg/validators/version/served -# sigs.k8s.io/gateway-api v1.1.0 -## explicit; go 1.22.0 +# sigs.k8s.io/gateway-api v1.4.0 +## explicit; go 1.24.0 sigs.k8s.io/gateway-api/apis/v1 -# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 +# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go new file mode 100644 index 000000000..5a755eb1b --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go @@ -0,0 +1,318 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=gateway-api,shortName=btlspolicy +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +// BackendTLSPolicy is a Direct Attached Policy. +// +kubebuilder:metadata:labels="gateway.networking.k8s.io/policy=Direct" + +// BackendTLSPolicy provides a way to configure how a Gateway +// connects to a Backend via TLS. +type BackendTLSPolicy struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of BackendTLSPolicy. + // +required + Spec BackendTLSPolicySpec `json:"spec"` + + // Status defines the current state of BackendTLSPolicy. + // +optional + Status PolicyStatus `json:"status,omitempty"` +} + +// BackendTLSPolicyList contains a list of BackendTLSPolicies +// +kubebuilder:object:root=true +type BackendTLSPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackendTLSPolicy `json:"items"` +} + +// BackendTLSPolicySpec defines the desired state of BackendTLSPolicy. +// +// Support: Extended +type BackendTLSPolicySpec struct { + // TargetRefs identifies an API object to apply the policy to. + // Only Services have Extended support. Implementations MAY support + // additional objects, with Implementation Specific support. + // Note that this config applies to the entire referenced resource + // by default, but this default may change in the future to provide + // a more granular application of the policy. + // + // TargetRefs must be _distinct_. This means either that: + // + // * They select different targets. If this is the case, then targetRef + // entries are distinct. In terms of fields, this means that the + // multi-part key defined by `group`, `kind`, and `name` must + // be unique across all targetRef entries in the BackendTLSPolicy. + // * They select different sectionNames in the same target. + // + // + // When more than one BackendTLSPolicy selects the same target and + // sectionName, implementations MUST determine precedence using the + // following criteria, continuing on ties: + // + // * The older policy by creation timestamp takes precedence. For + // example, a policy with a creation timestamp of "2021-07-15 + // 01:02:03" MUST be given precedence over a policy with a + // creation timestamp of "2021-07-15 01:02:04". + // * The policy appearing first in alphabetical order by {name}. + // For example, a policy named `bar` is given precedence over a + // policy named `baz`. + // + // For any BackendTLSPolicy that does not take precedence, the + // implementation MUST ensure the `Accepted` Condition is set to + // `status: False`, with Reason `Conflicted`. + // + // Support: Extended for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // +required + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="sectionName must be specified when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name ? ((!has(p1.sectionName) || p1.sectionName == '') == (!has(p2.sectionName) || p2.sectionName == '')) : true))" + // +kubebuilder:validation:XValidation:message="sectionName must be unique when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || (has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName))))" + TargetRefs []LocalPolicyTargetReferenceWithSectionName `json:"targetRefs"` + + // Validation contains backend TLS validation configuration. + // +required + Validation BackendTLSPolicyValidation `json:"validation"` + + // Options are a list of key/value pairs to enable extended TLS + // configuration for each implementation. For example, configuring the + // minimum TLS version or supported cipher suites. + // + // A set of common keys MAY be defined by the API in the future. To avoid + // any ambiguity, implementation-specific definitions MUST use + // domain-prefixed names, such as `example.com/my-custom-option`. + // Un-prefixed names are reserved for key names defined by Gateway API. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxProperties=16 + Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` +} + +// BackendTLSPolicyValidation contains backend TLS validation configuration. +// +kubebuilder:validation:XValidation:message="must not contain both CACertificateRefs and WellKnownCACertificates",rule="!(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 && has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +// +kubebuilder:validation:XValidation:message="must specify either CACertificateRefs or WellKnownCACertificates",rule="(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 || has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +type BackendTLSPolicyValidation struct { + // CACertificateRefs contains one or more references to Kubernetes objects that + // contain a PEM-encoded TLS CA certificate bundle, which is used to + // validate a TLS handshake between the Gateway and backend Pod. + // + // If CACertificateRefs is empty or unspecified, then WellKnownCACertificates must be + // specified. Only one of CACertificateRefs or WellKnownCACertificates may be specified, + // not both. If CACertificateRefs is empty or unspecified, the configuration for + // WellKnownCACertificates MUST be honored instead if supported by the implementation. + // + // A CACertificateRef is invalid if: + // + // * It refers to a resource that cannot be resolved (e.g., the referenced resource + // does not exist) or is misconfigured (e.g., a ConfigMap does not contain a key + // named `ca.crt`). In this case, the Reason must be set to `InvalidCACertificateRef` + // and the Message of the Condition must indicate which reference is invalid and why. + // + // * It refers to an unknown or unsupported kind of resource. In this case, the Reason + // must be set to `InvalidKind` and the Message of the Condition must explain which + // kind of resource is unknown or unsupported. + // + // * It refers to a resource in another namespace. This may change in future + // spec updates. + // + // Implementations MAY choose to perform further validation of the certificate + // content (e.g., checking expiry or enforcing specific formats). In such cases, + // an implementation-specific Reason and Message must be set for the invalid reference. + // + // In all cases, the implementation MUST ensure the `ResolvedRefs` Condition on + // the BackendTLSPolicy is set to `status: False`, with a Reason and Message + // that indicate the cause of the error. Connections using an invalid + // CACertificateRef MUST fail, and the client MUST receive an HTTP 5xx error + // response. If ALL CACertificateRefs are invalid, the implementation MUST also + // ensure the `Accepted` Condition on the BackendTLSPolicy is set to + // `status: False`, with a Reason `NoValidCACertificate`. + // + // + // A single CACertificateRef to a Kubernetes ConfigMap kind has "Core" support. + // Implementations MAY choose to support attaching multiple certificates to + // a backend, but this behavior is implementation-specific. + // + // Support: Core - An optional single reference to a Kubernetes ConfigMap, + // with the CA certificate in a key named `ca.crt`. + // + // Support: Implementation-specific - More than one reference, other kinds + // of resources, or a single reference that includes multiple certificates. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + CACertificateRefs []LocalObjectReference `json:"caCertificateRefs,omitempty"` + + // WellKnownCACertificates specifies whether system CA certificates may be used in + // the TLS handshake between the gateway and backend pod. + // + // If WellKnownCACertificates is unspecified or empty (""), then CACertificateRefs + // must be specified with at least one entry for a valid configuration. Only one of + // CACertificateRefs or WellKnownCACertificates may be specified, not both. + // If an implementation does not support the WellKnownCACertificates field, or + // the supplied value is not recognized, the implementation MUST ensure the + // `Accepted` Condition on the BackendTLSPolicy is set to `status: False`, with + // a Reason `Invalid`. + // + // Support: Implementation-specific + // + // +optional + // +listType=atomic + WellKnownCACertificates *WellKnownCACertificatesType `json:"wellKnownCACertificates,omitempty"` + + // Hostname is used for two purposes in the connection between Gateways and + // backends: + // + // 1. Hostname MUST be used as the SNI to connect to the backend (RFC 6066). + // 2. Hostname MUST be used for authentication and MUST match the certificate + // served by the matching backend, unless SubjectAltNames is specified. + // 3. If SubjectAltNames are specified, Hostname can be used for certificate selection + // but MUST NOT be used for authentication. If you want to use the value + // of the Hostname field for authentication, you MUST add it to the SubjectAltNames list. + // + // Support: Core + // + // +required + Hostname PreciseHostname `json:"hostname"` + + // SubjectAltNames contains one or more Subject Alternative Names. + // When specified the certificate served from the backend MUST + // have at least one Subject Alternate Name matching one of the specified SubjectAltNames. + // + // Support: Extended + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=5 + SubjectAltNames []SubjectAltName `json:"subjectAltNames,omitempty"` +} + +// SubjectAltName represents Subject Alternative Name. +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain Hostname, if Type is set to Hostname",rule="!(self.type == \"Hostname\" && (!has(self.hostname) || self.hostname == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain Hostname, if Type is not set to Hostname",rule="!(self.type != \"Hostname\" && has(self.hostname) && self.hostname != \"\")" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain URI, if Type is set to URI",rule="!(self.type == \"URI\" && (!has(self.uri) || self.uri == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain URI, if Type is not set to URI",rule="!(self.type != \"URI\" && has(self.uri) && self.uri != \"\")" +type SubjectAltName struct { + // Type determines the format of the Subject Alternative Name. Always required. + // + // Support: Core + // + // +required + Type SubjectAltNameType `json:"type"` + + // Hostname contains Subject Alternative Name specified in DNS name format. + // Required when Type is set to Hostname, ignored otherwise. + // + // Support: Core + // + // +optional + Hostname Hostname `json:"hostname,omitempty"` + + // URI contains Subject Alternative Name specified in a full URI format. + // It MUST include both a scheme (e.g., "http" or "ftp") and a scheme-specific-part. + // Common values include SPIFFE IDs like "spiffe://mycluster.example.com/ns/myns/sa/svc1sa". + // Required when Type is set to URI, ignored otherwise. + // + // Support: Core + // + // +optional + URI AbsoluteURI `json:"uri,omitempty"` +} + +// WellKnownCACertificatesType is the type of CA certificate that will be used +// when the caCertificateRefs field is unspecified. +// +kubebuilder:validation:Enum=System +type WellKnownCACertificatesType string + +const ( + // WellKnownCACertificatesSystem indicates that well known system CA certificates should be used. + WellKnownCACertificatesSystem WellKnownCACertificatesType = "System" +) + +// SubjectAltNameType is the type of the Subject Alternative Name. +// +kubebuilder:validation:Enum=Hostname;URI +type SubjectAltNameType string + +const ( + // HostnameSubjectAltNameType specifies hostname-based SAN. + // + // Support: Core + HostnameSubjectAltNameType SubjectAltNameType = "Hostname" + + // URISubjectAltNameType specifies URI-based SAN, e.g. SPIFFE id. + // + // Support: Core + URISubjectAltNameType SubjectAltNameType = "URI" +) + +const ( + // This reason is used with the "Accepted" condition when it is + // set to false because all CACertificateRefs of the + // BackendTLSPolicy are invalid. + BackendTLSPolicyReasonNoValidCACertificate PolicyConditionReason = "NoValidCACertificate" +) + +const ( + // This condition indicates whether the controller was able to resolve all + // object references for the BackendTLSPolicy. + // + // Possible reasons for this condition to be True are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCACertificateRef" + // * "InvalidKind" + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + BackendTLSPolicyConditionResolvedRefs PolicyConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + BackendTLSPolicyReasonResolvedRefs PolicyConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs is invalid. + // A CACertificateRef is considered invalid when it refers to a nonexistent + // resource or when the data within that resource is malformed. + BackendTLSPolicyReasonInvalidCACertificateRef PolicyConditionReason = "InvalidCACertificateRef" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs references an unknown or unsupported + // Group and/or Kind. + BackendTLSPolicyReasonInvalidKind PolicyConditionReason = "InvalidKind" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go index caa5e96bf..58d975186 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go @@ -33,15 +33,18 @@ import ( // Gateway represents an instance of a service-traffic handling infrastructure // by binding Listeners to a set of IP addresses. type Gateway struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of Gateway. + // +required Spec GatewaySpec `json:"spec"` // Status defines the current state of Gateway. // // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional Status GatewayStatus `json:"status,omitempty"` } @@ -63,12 +66,15 @@ type GatewayList struct { type GatewaySpec struct { // GatewayClassName used for this Gateway. This is the name of a // GatewayClass resource. + // +required GatewayClassName ObjectName `json:"gatewayClassName"` // Listeners associated with this Gateway. Listeners define // logical endpoints that are bound on this Gateway's addresses. // At least one Listener MUST be specified. // + // ## Distinct Listeners + // // Each Listener in a set of Listeners (for example, in a single Gateway) // MUST be _distinct_, in that a traffic flow MUST be able to be assigned to // exactly one listener. (This section uses "set of Listeners" rather than @@ -80,55 +86,76 @@ type GatewaySpec struct { // combination of Port, Protocol, and, if supported by the protocol, Hostname. // // Some combinations of port, protocol, and TLS settings are considered - // Core support and MUST be supported by implementations based on their - // targeted conformance profile: + // Core support and MUST be supported by implementations based on the objects + // they support: // - // HTTP Profile + // HTTPRoute // // 1. HTTPRoute, Port: 80, Protocol: HTTP // 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: Terminate, TLS keypair provided // - // TLS Profile + // TLSRoute // // 1. TLSRoute, Port: 443, Protocol: TLS, TLS Mode: Passthrough // // "Distinct" Listeners have the following property: // - // The implementation can match inbound requests to a single distinct - // Listener. When multiple Listeners share values for fields (for + // **The implementation can match inbound requests to a single distinct + // Listener**. + // + // When multiple Listeners share values for fields (for // example, two Listeners with the same Port value), the implementation // can match requests to only one of the Listeners using other // Listener fields. // - // For example, the following Listener scenarios are distinct: + // When multiple listeners have the same value for the Protocol field, then + // each of the Listeners with matching Protocol values MUST have different + // values for other fields. + // + // The set of fields that MUST be different for a Listener differs per protocol. + // The following rules define the rules for what fields MUST be considered for + // Listeners to be distinct with each protocol currently defined in the + // Gateway API spec. + // + // The set of listeners that all share a protocol value MUST have _different_ + // values for _at least one_ of these fields to be distinct: // - // 1. Multiple Listeners with the same Port that all use the "HTTP" - // Protocol that all have unique Hostname values. - // 2. Multiple Listeners with the same Port that use either the "HTTPS" or - // "TLS" Protocol that all have unique Hostname values. - // 3. A mixture of "TCP" and "UDP" Protocol Listeners, where no Listener - // with the same Protocol has the same Port value. + // * **HTTP, HTTPS, TLS**: Port, Hostname + // * **TCP, UDP**: Port // - // Some fields in the Listener struct have possible values that affect - // whether the Listener is distinct. Hostname is particularly relevant - // for HTTP or HTTPS protocols. + // One **very** important rule to call out involves what happens when an + // implementation: // - // When using the Hostname value to select between same-Port, same-Protocol - // Listeners, the Hostname value must be different on each Listener for the - // Listener to be distinct. + // * Supports TCP protocol Listeners, as well as HTTP, HTTPS, or TLS protocol + // Listeners, and + // * sees HTTP, HTTPS, or TLS protocols with the same `port` as one with TCP + // Protocol. // - // When the Listeners are distinct based on Hostname, inbound request + // In this case all the Listeners that share a port with the + // TCP Listener are not distinct and so MUST NOT be accepted. + // + // If an implementation does not support TCP Protocol Listeners, then the + // previous rule does not apply, and the TCP Listeners SHOULD NOT be + // accepted. + // + // Note that the `tls` field is not used for determining if a listener is distinct, because + // Listeners that _only_ differ on TLS config will still conflict in all cases. + // + // ### Listeners that are distinct only by Hostname + // + // When the Listeners are distinct based only on Hostname, inbound request // hostnames MUST match from the most specific to least specific Hostname // values to choose the correct Listener and its associated set of Routes. // - // Exact matches must be processed before wildcard matches, and wildcard - // matches must be processed before fallback (empty Hostname value) + // Exact matches MUST be processed before wildcard matches, and wildcard + // matches MUST be processed before fallback (empty Hostname value) // matches. For example, `"foo.example.com"` takes precedence over // `"*.example.com"`, and `"*.example.com"` takes precedence over `""`. // // Additionally, if there are multiple wildcard entries, more specific // wildcard entries must be processed before less specific wildcard entries. // For example, `"*.foo.example.com"` takes precedence over `"*.example.com"`. + // // The precise definition here is that the higher the number of dots in the // hostname to the right of the wildcard character, the higher the precedence. // @@ -136,18 +163,26 @@ type GatewaySpec struct { // the left, however, so `"*.example.com"` will match both // `"foo.bar.example.com"` _and_ `"bar.example.com"`. // + // ## Handling indistinct Listeners + // // If a set of Listeners contains Listeners that are not distinct, then those - // Listeners are Conflicted, and the implementation MUST set the "Conflicted" + // Listeners are _Conflicted_, and the implementation MUST set the "Conflicted" // condition in the Listener Status to "True". // + // The words "indistinct" and "conflicted" are considered equivalent for the + // purpose of this documentation. + // // Implementations MAY choose to accept a Gateway with some Conflicted // Listeners only if they only accept the partial Listener set that contains - // no Conflicted Listeners. To put this another way, implementations may - // accept a partial Listener set only if they throw out *all* the conflicting - // Listeners. No picking one of the conflicting listeners as the winner. - // This also means that the Gateway must have at least one non-conflicting - // Listener in this case, otherwise it violates the requirement that at - // least one Listener must be present. + // no Conflicted Listeners. + // + // Specifically, an implementation MAY accept a partial Listener set subject to + // the following rules: + // + // * The implementation MUST NOT pick one conflicting Listener as the winner. + // ALL indistinct Listeners must not be accepted for processing. + // * At least one distinct Listener MUST be present, or else the Gateway effectively + // contains _no_ Listeners, and must be rejected from processing as a whole. // // The implementation MUST set a "ListenersNotValid" condition on the // Gateway Status when the Gateway contains Conflicted Listeners whether or @@ -156,7 +191,25 @@ type GatewaySpec struct { // Accepted. Additionally, the Listener status for those listeners SHOULD // indicate which Listeners are conflicted and not Accepted. // - // A Gateway's Listeners are considered "compatible" if: + // ## General Listener behavior + // + // Note that, for all distinct Listeners, requests SHOULD match at most one Listener. + // For example, if Listeners are defined for "foo.example.com" and "*.example.com", a + // request to "foo.example.com" SHOULD only be routed using routes attached + // to the "foo.example.com" Listener (and not the "*.example.com" Listener). + // + // This concept is known as "Listener Isolation", and it is an Extended feature + // of Gateway API. Implementations that do not support Listener Isolation MUST + // clearly document this, and MUST NOT claim support for the + // `GatewayHTTPListenerIsolation` feature. + // + // Implementations that _do_ support Listener Isolation SHOULD claim support + // for the Extended `GatewayHTTPListenerIsolation` feature and pass the associated + // conformance tests. + // + // ## Compatible Listeners + // + // A Gateway's Listeners are considered _compatible_ if: // // 1. They are distinct. // 2. The implementation can serve them in compliance with the Addresses @@ -171,16 +224,11 @@ type GatewaySpec struct { // on the same address, or cannot mix HTTPS and generic TLS listens on the same port // would not consider those cases compatible, even though they are distinct. // - // Note that requests SHOULD match at most one Listener. For example, if - // Listeners are defined for "foo.example.com" and "*.example.com", a - // request to "foo.example.com" SHOULD only be routed using routes attached - // to the "foo.example.com" Listener (and not the "*.example.com" Listener). - // This concept is known as "Listener Isolation". Implementations that do - // not support Listener Isolation MUST clearly document this. - // // Implementations MAY merge separate Gateways onto a single set of // Addresses if all Listeners across all Gateways are compatible. // + // In a future release the MinItems=1 requirement MAY be dropped. + // // Support: Core // // +listType=map @@ -192,12 +240,13 @@ type GatewaySpec struct { // +kubebuilder:validation:XValidation:message="hostname must not be specified for protocols ['TCP', 'UDP']",rule="self.all(l, l.protocol in ['TCP', 'UDP'] ? (!has(l.hostname) || l.hostname == '') : true)" // +kubebuilder:validation:XValidation:message="Listener name must be unique within the Gateway",rule="self.all(l1, self.exists_one(l2, l1.name == l2.name))" // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))" + // +required Listeners []Listener `json:"listeners"` // Addresses requested for this Gateway. This is optional and behavior can // depend on the implementation. If a value is set in the spec and the // requested address is invalid or unavailable, the implementation MUST - // indicate this in the associated entry in GatewayStatus.Addresses. + // indicate this in an associated entry in GatewayStatus.Conditions. // // The Addresses field represents a request for the address(es) on the // "outside of the Gateway", that traffic bound for this Gateway will use. @@ -216,19 +265,92 @@ type GatewaySpec struct { // Support: Extended // // +optional + // +listType=atomic // // +kubebuilder:validation:MaxItems=16 - // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" - // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" - Addresses []GatewayAddress `json:"addresses,omitempty"` + // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + Addresses []GatewaySpecAddress `json:"addresses,omitempty"` // Infrastructure defines infrastructure level attributes about this Gateway instance. // - // Support: Core + // Support: Extended // - // // +optional Infrastructure *GatewayInfrastructure `json:"infrastructure,omitempty"` + + // AllowedListeners defines which ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // + // + // +optional + AllowedListeners *AllowedListeners `json:"allowedListeners,omitempty"` + // + // TLS specifies frontend and backend tls configuration for entire gateway. + // + // Support: Extended + // + // +optional + // + TLS *GatewayTLSConfig `json:"tls,omitempty"` + + // DefaultScope, when set, configures the Gateway as a default Gateway, + // meaning it will dynamically and implicitly have Routes (e.g. HTTPRoute) + // attached to it, according to the scope configured here. + // + // If unset (the default) or set to None, the Gateway will not act as a + // default Gateway; if set, the Gateway will claim any Route with a + // matching scope set in its UseDefaultGateway field, subject to the usual + // rules about which routes the Gateway can attach to. + // + // Think carefully before using this functionality! While the normal rules + // about which Route can apply are still enforced, it is simply easier for + // the wrong Route to be accidentally attached to this Gateway in this + // configuration. If the Gateway operator is not also the operator in + // control of the scope (e.g. namespace) with tight controls and checks on + // what kind of workloads and Routes get added in that scope, we strongly + // recommend not using this just because it seems convenient, and instead + // stick to direct Route attachment. + // + // +optional + // + DefaultScope GatewayDefaultScope `json:"defaultScope,omitempty"` +} + +// AllowedListeners defines which ListenerSets can be attached to this Gateway. +type AllowedListeners struct { + // Namespaces defines which namespaces ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // +optional + // +kubebuilder:default={from: None} + Namespaces *ListenerNamespaces `json:"namespaces,omitempty"` +} + +// ListenerNamespaces indicate which namespaces ListenerSets should be selected from. +type ListenerNamespaces struct { + // From indicates where ListenerSets can attach to this Gateway. Possible + // values are: + // + // * Same: Only ListenerSets in the same namespace may be attached to this Gateway. + // * Selector: ListenerSets in namespaces selected by the selector may be attached to this Gateway. + // * All: ListenerSets in all namespaces may be attached to this Gateway. + // * None: Only listeners defined in the Gateway's spec are allowed + // + // While this feature is experimental, the default value None + // + // +optional + // +kubebuilder:default=None + // +kubebuilder:validation:Enum=All;Selector;Same;None + From *FromNamespaces `json:"from,omitempty"` + + // Selector must be specified when From is set to "Selector". In that case, + // only ListenerSets in Namespaces matching this Selector will be selected by this + // Gateway. This field is ignored for other values of "From". + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` } // Listener embodies the concept of a logical endpoint where a Gateway accepts @@ -238,6 +360,7 @@ type Listener struct { // Gateway. // // Support: Core + // +required Name SectionName `json:"name"` // Hostname specifies the virtual hostname to match for protocol types that @@ -250,10 +373,31 @@ type Listener struct { // // * TLS: The Listener Hostname MUST match the SNI. // * HTTP: The Listener Hostname MUST match the Host header of the request. - // * HTTPS: The Listener Hostname SHOULD match at both the TLS and HTTP - // protocol layers as described above. If an implementation does not - // ensure that both the SNI and Host header match the Listener hostname, - // it MUST clearly document that. + // * HTTPS: The Listener Hostname SHOULD match both the SNI and Host header. + // Note that this does not require the SNI and Host header to be the same. + // The semantics of this are described in more detail below. + // + // To ensure security, Section 11.1 of RFC-6066 emphasizes that server + // implementations that rely on SNI hostname matching MUST also verify + // hostnames within the application protocol. + // + // Section 9.1.2 of RFC-7540 provides a mechanism for servers to reject the + // reuse of a connection by responding with the HTTP 421 Misdirected Request + // status code. This indicates that the origin server has rejected the + // request because it appears to have been misdirected. + // + // To detect misdirected requests, Gateways SHOULD match the authority of + // the requests with all the SNI hostname(s) configured across all the + // Gateway Listeners on the same port and protocol: + // + // * If another Listener has an exact match or more specific wildcard entry, + // the Gateway SHOULD return a 421. + // * If the current Listener (selected by SNI matching during ClientHello) + // does not match the Host: + // * If another Listener does match the Host the Gateway SHOULD return a + // 421. + // * If no other Listener matches the Host, the Gateway MUST return a + // 404. // // For HTTPRoute and TLSRoute resources, there is an interaction with the // `spec.hostnames` array. When both listener and route specify hostnames, @@ -274,18 +418,24 @@ type Listener struct { // same port, subject to the Listener compatibility rules. // // Support: Core + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + // +required Port PortNumber `json:"port"` // Protocol specifies the network protocol this listener expects to receive. // // Support: Core + // +required Protocol ProtocolType `json:"protocol"` // TLS is the TLS configuration for the Listener. This field is required if // the Protocol field is "HTTPS" or "TLS". It is invalid to set this field // if the Protocol field is "HTTP", "TCP", or "UDP". // - // The association of SNIs to Certificate defined in GatewayTLSConfig is + // The association of SNIs to Certificate defined in ListenerTLSConfig is // defined based on the Hostname field for this listener. // // The GatewayClass MUST use the longest matching SNI out of all @@ -294,7 +444,7 @@ type Listener struct { // Support: Core // // +optional - TLS *GatewayTLSConfig `json:"tls,omitempty"` + TLS *ListenerTLSConfig `json:"tls,omitempty"` // AllowedRoutes defines the types of routes that MAY be attached to a // Listener and the trusted namespaces where those Route resources MAY be @@ -351,7 +501,7 @@ type Listener struct { // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 -// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zSA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` +// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` type ProtocolType string const ( @@ -374,10 +524,31 @@ const ( UDPProtocolType ProtocolType = "UDP" ) -// GatewayTLSConfig describes a TLS configuration. +// GatewayBackendTLS describes backend TLS configuration for gateway. +type GatewayBackendTLS struct { + // ClientCertificateRef is a reference to an object that contains a Client + // Certificate and the associated private key. + // + // References to a resource in different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // ClientCertificateRef can reference to standard Kubernetes resources, i.e. + // Secret, or implementation-specific custom resources. + // + // Support: Core + // + // +optional + // + ClientCertificateRef *SecretObjectReference `json:"clientCertificateRef,omitempty"` +} + +// ListenerTLSConfig describes a TLS configuration for a listener. // // +kubebuilder:validation:XValidation:message="certificateRefs or options must be specified when mode is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 || size(self.options) > 0 : true" -type GatewayTLSConfig struct { +type ListenerTLSConfig struct { // Mode defines the TLS behavior for the TLS session initiated by the client. // There are two possible modes: // @@ -422,21 +593,10 @@ type GatewayTLSConfig struct { // Support: Implementation-specific (More than one reference or other resource types) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=64 CertificateRefs []SecretObjectReference `json:"certificateRefs,omitempty"` - // FrontendValidation holds configuration information for validating the frontend (client). - // Setting this field will require clients to send a client certificate - // required for validation during the TLS handshake. In browsers this may result in a dialog appearing - // that requests a user to specify the client certificate. - // The maximum depth of a certificate chain accepted in verification is Implementation specific. - // - // Support: Extended - // - // +optional - // - FrontendValidation *FrontendTLSValidation `json:"frontendValidation,omitempty"` - // Options are a list of key/value pairs to enable extended TLS // configuration for each implementation. For example, configuring the // minimum TLS version or supported cipher suites. @@ -453,6 +613,58 @@ type GatewayTLSConfig struct { Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` } +// GatewayTLSConfig specifies frontend and backend tls configuration for gateway. +type GatewayTLSConfig struct { + // Backend describes TLS configuration for gateway when connecting + // to backends. + // + // Note that this contains only details for the Gateway as a TLS client, + // and does _not_ imply behavior about how to choose which backend should + // get a TLS connection. That is determined by the presence of a BackendTLSPolicy. + // + // Support: Core + // + // +optional + // + Backend *GatewayBackendTLS `json:"backend,omitempty"` + + // Frontend describes TLS config when client connects to Gateway. + // Support: Core + // + // +optional + // + Frontend *FrontendTLSConfig `json:"frontend,omitempty"` +} + +// FrontendTLSConfig specifies frontend tls configuration for gateway. +type FrontendTLSConfig struct { + // Default specifies the default client certificate validation configuration + // for all Listeners handling HTTPS traffic, unless a per-port configuration + // is defined. + // + // support: Core + // + // +required + // + Default TLSConfig `json:"default"` + + // PerPort specifies tls configuration assigned per port. + // Per port configuration is optional. Once set this configuration overrides + // the default configuration for all Listeners handling HTTPS traffic + // that match this port. + // Each override port requires a unique TLS configuration. + // + // support: Core + // + // +optional + // +listType=map + // +listMapKey=port + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="Port for TLS configuration must be unique within the Gateway",rule="self.all(t1, self.exists_one(t2, t1.port == t2.port))" + // + PerPort []TLSPortConfig `json:"perPort,omitempty"` +} + // TLSModeType type defines how a Gateway handles TLS sessions. // // +kubebuilder:validation:Enum=Terminate;Passthrough @@ -471,6 +683,46 @@ const ( TLSModePassthrough TLSModeType = "Passthrough" ) +// TLSConfig describes TLS configuration that can apply to multiple Listeners +// within this Gateway. Currently, it stores only the client certificate validation +// configuration, but this may be extended in the future. +type TLSConfig struct { + // Validation holds configuration information for validating the frontend (client). + // Setting this field will result in mutual authentication when connecting to the gateway. + // In browsers this may result in a dialog appearing + // that requests a user to specify the client certificate. + // The maximum depth of a certificate chain accepted in verification is Implementation specific. + // + // Support: Core + // + // +optional + // + Validation *FrontendTLSValidation `json:"validation,omitempty"` +} + +type TLSPortConfig struct { + // The Port indicates the Port Number to which the TLS configuration will be + // applied. This configuration will be applied to all Listeners handling HTTPS + // traffic that match this port. + // + // Support: Core + // + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + Port PortNumber `json:"port"` + + // TLS store the configuration that will be applied to all Listeners handling + // HTTPS traffic and matching given port. + // + // Support: Core + // + // +required + // + TLS TLSConfig `json:"tls"` +} + // FrontendTLSValidation holds configuration information that can be used to validate // the frontend initiating the TLS connection type FrontendTLSValidation struct { @@ -487,8 +739,8 @@ type FrontendTLSValidation struct { // Support: Core - A single reference to a Kubernetes ConfigMap // with the CA certificate in a key named `ca.crt`. // - // Support: Implementation-specific (More than one reference, or other kinds - // of resources). + // Support: Implementation-specific (More than one certificate in a ConfigMap + // with different keys or more than one reference, or other kinds of resources). // // References to a resource in a different namespace are invalid UNLESS there // is a ReferenceGrant in the target namespace that allows the certificate @@ -496,11 +748,53 @@ type FrontendTLSValidation struct { // "ResolvedRefs" condition MUST be set to False for this listener with the // "RefNotPermitted" reason. // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=8 // +kubebuilder:validation:MinItems=1 - CACertificateRefs []ObjectReference `json:"caCertificateRefs,omitempty"` + CACertificateRefs []ObjectReference `json:"caCertificateRefs"` + + // FrontendValidationMode defines the mode for validating the client certificate. + // There are two possible modes: + // + // - AllowValidOnly: In this mode, the gateway will accept connections only if + // the client presents a valid certificate. This certificate must successfully + // pass validation against the CA certificates specified in `CACertificateRefs`. + // - AllowInsecureFallback: In this mode, the gateway will accept connections + // even if the client certificate is not presented or fails verification. + // + // This approach delegates client authorization to the backend and introduce + // a significant security risk. It should be used in testing environments or + // on a temporary basis in non-testing environments. + // + // Defaults to AllowValidOnly. + // + // Support: Core + // + // +optional + // +kubebuilder:default=AllowValidOnly + Mode FrontendValidationModeType `json:"mode,omitempty"` } +// FrontendValidationModeType type defines how a Gateway validates client certificates. +// +// +kubebuilder:validation:Enum=AllowValidOnly;AllowInsecureFallback +type FrontendValidationModeType string + +const ( + // AllowValidOnly indicates that a client certificate is required + // during the TLS handshake and MUST pass validation. + // + // Support: Core + AllowValidOnly FrontendValidationModeType = "AllowValidOnly" + + // AllowInsecureFallback indicates that a client certificate may not be + // presented during the handshake or the validation against CA certificates may fail. + // + // Support: Extended + AllowInsecureFallback FrontendValidationModeType = "AllowInsecureFallback" +) + // AllowedRoutes defines which Routes may be attached to this Listener. type AllowedRoutes struct { // Namespaces indicates namespaces from which Routes may be attached to this @@ -509,6 +803,7 @@ type AllowedRoutes struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:default={from: Same} Namespaces *RouteNamespaces `json:"namespaces,omitempty"` @@ -525,25 +820,26 @@ type AllowedRoutes struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=8 Kinds []RouteGroupKind `json:"kinds,omitempty"` } -// FromNamespaces specifies namespace from which Routes may be attached to a +// FromNamespaces specifies namespace from which Routes/ListenerSets may be attached to a // Gateway. -// -// +kubebuilder:validation:Enum=All;Selector;Same type FromNamespaces string const ( - // Routes in all namespaces may be attached to this Gateway. + // Routes/ListenerSets in all namespaces may be attached to this Gateway. NamespacesFromAll FromNamespaces = "All" - // Only Routes in namespaces selected by the selector may be attached to + // Only Routes/ListenerSets in namespaces selected by the selector may be attached to // this Gateway. NamespacesFromSelector FromNamespaces = "Selector" - // Only Routes in the same namespace as the Gateway may be attached to this + // Only Routes/ListenerSets in the same namespace as the Gateway may be attached to this // Gateway. NamespacesFromSame FromNamespaces = "Same" + // No Routes/ListenerSets may be attached to this Gateway. + NamespacesFromNone FromNamespaces = "None" ) // RouteNamespaces indicate which namespaces Routes should be selected from. @@ -560,6 +856,7 @@ type RouteNamespaces struct { // // +optional // +kubebuilder:default=Same + // +kubebuilder:validation:Enum=All;Selector;Same From *FromNamespaces `json:"from,omitempty"` // Selector must be specified when From is set to "Selector". In that case, @@ -581,27 +878,31 @@ type RouteGroupKind struct { Group *Group `json:"group,omitempty"` // Kind is the kind of the Route. + // +required Kind Kind `json:"kind"` } -// GatewayAddress describes an address that can be bound to a Gateway. +// GatewaySpecAddress describes an address that can be bound to a Gateway. // -// +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" -type GatewayAddress struct { +// +kubebuilder:validation:XValidation:message="Hostname value must be empty or contain only valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? (!has(self.value) || self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\")): true" +type GatewaySpecAddress struct { // Type of the address. // // +optional // +kubebuilder:default=IPAddress Type *AddressType `json:"type,omitempty"` - // Value of the address. The validity of the values will depend - // on the type and support by the controller. + // When a value is unspecified, an implementation SHOULD automatically + // assign an address matching the requested type if possible. + // + // If an implementation does not support an empty value, they MUST set the + // "Programmed" condition in status to False with a reason of "AddressNotAssigned". // // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. // - // +kubebuilder:validation:MinLength=1 + // +optional // +kubebuilder:validation:MaxLength=253 - Value string `json:"value"` + Value string `json:"value,omitempty"` } // GatewayStatusAddress describes a network address that is bound to a Gateway. @@ -621,6 +922,7 @@ type GatewayStatusAddress struct { // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Value string `json:"value"` } @@ -637,6 +939,7 @@ type GatewayStatus struct { // * a specified address was unusable (e.g. already in use) // // +optional + // +listType=atomic // // +kubebuilder:validation:MaxItems=16 Addresses []GatewayStatusAddress `json:"addresses,omitempty"` @@ -654,6 +957,34 @@ type GatewayStatus struct { // * "Programmed" // * "Ready" // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // // +optional // +listType=map // +listMapKey=type @@ -679,11 +1010,16 @@ type GatewayInfrastructure struct { // // An implementation may chose to add additional implementation-specific labels as they see fit. // + // If an implementation maps these labels to Pods, or any other resource that would need to be recreated when labels + // change, it SHOULD clearly warn about this behavior in documentation. + // // Support: Extended // // +optional // +kubebuilder:validation:MaxProperties=8 - Labels map[AnnotationKey]AnnotationValue `json:"labels,omitempty"` + // +kubebuilder:validation:XValidation:message="Label keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the label key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" + Labels map[LabelKey]LabelValue `json:"labels,omitempty"` // Annotations that SHOULD be applied to any resources created in response to this Gateway. // @@ -696,6 +1032,8 @@ type GatewayInfrastructure struct { // // +optional // +kubebuilder:validation:MaxProperties=8 + // +kubebuilder:validation:XValidation:message="Annotation keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the annotation key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" Annotations map[AnnotationKey]AnnotationValue `json:"annotations,omitempty"` // ParametersRef is a reference to a resource that contains the configuration @@ -708,6 +1046,11 @@ type GatewayInfrastructure struct { // the merging behavior is implementation specific. // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. // + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the Gateway SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. + // // Support: Implementation-specific // // +optional @@ -718,15 +1061,18 @@ type GatewayInfrastructure struct { // configuration resource within the namespace. type LocalParametersReference struct { // Group is the group of the referent. + // +required Group Group `json:"group"` // Kind is kind of the referent. + // +required Kind Kind `json:"kind"` // Name is the name of the referent. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Name string `json:"name"` } @@ -814,6 +1160,13 @@ const ( // information on which address is causing the problem and how to resolve it // in the condition message. GatewayReasonAddressNotUsable GatewayConditionReason = "AddressNotUsable" + // This condition indicates `FrontendValidationModeType` changed from + // `AllowValidOnly` to `AllowInsecureFallback`. + GatewayConditionInsecureFrontendValidationMode GatewayConditionReason = "InsecureFrontendValidationMode" + // This reason MUST be set for GatewayConditionInsecureFrontendValidationMode + // when client change FrontendValidationModeType for a Gateway or per port override + // to `AllowInsecureFallback`. + GatewayReasonConfigurationChanged GatewayConditionReason = "ConfigurationChanged" ) const ( @@ -907,9 +1260,41 @@ const ( GatewayReasonListenersNotReady GatewayConditionReason = "ListenersNotReady" ) +const ( + // AttachedListenerSets is a condition that is true when the Gateway has + // at least one ListenerSet attached to it. + // + // Possible reasons for this condition to be True are: + // + // * "ListenerSetsAttached" + // + // Possible reasons for this condition to be False are: + // + // * "NoListenerSetsAttached" + // * "ListenerSetsNotAllowed" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionAttachedListenerSets GatewayConditionType = "AttachedListenerSets" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has at least one ListenerSet attached to it. + GatewayReasonListenerSetsAttached GatewayConditionReason = "ListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has no ListenerSets attached to it. + GatewayReasonNoListenerSetsAttached GatewayConditionReason = "NoListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has ListenerSets attached to it, but the ListenerSets are not allowed. + GatewayReasonListenerSetsNotAllowed GatewayConditionReason = "ListenerSetsNotAllowed" +) + // ListenerStatus is the status associated with a Listener. type ListenerStatus struct { // Name is the name of the Listener that this status corresponds to. + // +required Name SectionName `json:"name"` // SupportedKinds is the list indicating the Kinds supported by this @@ -922,6 +1307,8 @@ type ListenerStatus struct { // and invalid Route kinds are specified, the implementation MUST // reference the valid Route kinds that have been specified. // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=8 SupportedKinds []RouteGroupKind `json:"supportedKinds"` @@ -942,13 +1329,45 @@ type ListenerStatus struct { // // Uses for this field include troubleshooting Route attachment and // measuring blast radius/impact of changes to a Listener. + // +required AttachedRoutes int32 `json:"attachedRoutes"` // Conditions describe the current condition of this listener. // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +listType=map // +listMapKey=type // +kubebuilder:validation:MaxItems=8 + // +required Conditions []metav1.Condition `json:"conditions"` } @@ -1145,6 +1564,62 @@ const ( ListenerReasonPending ListenerConditionReason = "Pending" ) +const ( + // This condition indicates that TLS configuration within this Listener + // conflicts with TLS configuration in another Listener on the same port. + // This could happen for two reasons: + // + // 1) Overlapping Hostnames: Listener A matches *.example.com while Listener + // B matches foo.example.com. + // B) Overlapping Certificates: Listener A contains a certificate with a + // SAN for *.example.com, while Listener B contains a certificate with a + // SAN for foo.example.com. + // + // This overlapping TLS configuration can be particularly problematic when + // combined with HTTP connection coalescing. When clients reuse connections + // using this technique, it can have confusing interactions with Gateway + // API, such as TLS configuration for one Listener getting used for a + // request reusing an existing connection that would not be used if the same + // request was initiating a new connection. + // + // Controllers MUST detect the presence of overlapping hostnames and MAY + // detect the presence of overlapping certificates. + // + // This condition MUST be set on all Listeners with overlapping TLS config. + // For example, consider the following listener - hostname mapping: + // + // A: foo.example.com + // B: foo.example.org + // C: *.example.com + // + // In the above example, Listeners A and C would have overlapping hostnames + // and therefore this condition should be set for Listeners A and C, but not + // B. + // + // Possible reasons for this condition to be True are: + // + // * "OverlappingHostnames" + // * "OverlappingCertificates" + // + // If a controller supports checking for both possible reasons and finds + // that both are true, it SHOULD set the "OverlappingCertificates" Reason. + // + // This is a negative polarity condition and MUST NOT be set when it is + // False. + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + ListenerConditionOverlappingTLSConfig ListenerConditionType = "OverlappingTLSConfig" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingHostnames ListenerConditionReason = "OverlappingHostnames" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingCertificates ListenerConditionReason = "OverlappingCertificates" +) + const ( // "Ready" is a condition type reserved for future use. It should not be used by implementations. // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go index 21875dce1..972d35045 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go @@ -49,10 +49,12 @@ import ( // // GatewayClass is a Cluster level resource. type GatewayClass struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of GatewayClass. + // +required Spec GatewayClassSpec `json:"spec"` // Status defines the current state of GatewayClass. @@ -60,7 +62,8 @@ type GatewayClass struct { // Implementations MUST populate status on all GatewayClass resources which // specify their controller name. // - // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Waiting", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional Status GatewayClassStatus `json:"status,omitempty"` } @@ -83,6 +86,7 @@ type GatewayClassSpec struct { // Support: Core // // +kubebuilder:validation:XValidation:message="Value is immutable",rule="self == oldSelf" + // +required ControllerName GatewayController `json:"controllerName"` // ParametersRef is a reference to a resource that contains the configuration @@ -93,8 +97,10 @@ type GatewayClassSpec struct { // or an implementation-specific custom resource. The resource can be // cluster-scoped or namespace-scoped. // - // If the referent cannot be found, the GatewayClass's "InvalidParameters" - // status condition will be true. + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the GatewayClass SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. // // A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified, // the merging behavior is implementation specific. @@ -116,15 +122,18 @@ type GatewayClassSpec struct { // configuration resource within the cluster. type ParametersReference struct { // Group is the group of the referent. + // +required Group Group `json:"group"` // Kind is kind of the referent. + // +required Kind Kind `json:"kind"` // Name is the name of the referent. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Name string `json:"name"` // Namespace is the namespace of the referent. @@ -162,6 +171,7 @@ const ( // Possible reasons for this condition to be False are: // // * "InvalidParameters" + // * "Unsupported" // * "UnsupportedVersion" // // Possible reasons for this condition to be Unknown are: @@ -176,9 +186,13 @@ const ( // true. GatewayClassReasonAccepted GatewayClassConditionReason = "Accepted" - // This reason is used with the "Accepted" condition when the - // GatewayClass was not accepted because the parametersRef field - // was invalid, with more detail in the message. + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the parametersRef field refers to + // * a namespaced resource but the Namespace field is not set, or + // * a cluster-scoped resource but the Namespace field is set, or + // * a nonexistent object, or + // * an unsupported resource or kind, or + // * an existing resource but the data within that resource is malformed. GatewayClassReasonInvalidParameters GatewayClassConditionReason = "InvalidParameters" // This reason is used with the "Accepted" condition when the @@ -187,6 +201,11 @@ const ( // GatewayClass. GatewayClassReasonPending GatewayClassConditionReason = "Pending" + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the implementation does not support a + // user-defined GatewayClass. + GatewayClassReasonUnsupported GatewayClassConditionReason = "Unsupported" + // Deprecated: Use "Pending" instead. GatewayClassReasonWaiting GatewayClassConditionReason = "Waiting" ) @@ -244,6 +263,35 @@ type GatewayClassStatus struct { // Controllers should prefer to publish conditions using values // of GatewayClassConditionType for the type of each Condition. // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +optional // +listType=map // +listMapKey=type @@ -252,10 +300,10 @@ type GatewayClassStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` // SupportedFeatures is the set of features the GatewayClass support. - // It MUST be sorted in ascending alphabetical order. + // It MUST be sorted in ascending alphabetical order by the Name key. // +optional - // +listType=set - // + // +listType=map + // +listMapKey=name // +kubebuilder:validation:MaxItems=64 SupportedFeatures []SupportedFeature `json:"supportedFeatures,omitempty"` } @@ -269,6 +317,11 @@ type GatewayClassList struct { Items []GatewayClass `json:"items"` } -// SupportedFeature is used to describe distinct features that are covered by +// FeatureName is used to describe distinct features that are covered by // conformance tests. -type SupportedFeature string +type FeatureName string + +type SupportedFeature struct { + // +required + Name FeatureName `json:"name"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go new file mode 100644 index 000000000..8d768fdea --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "errors" +) + +// Below code handles the experimental field breaking change introduced in +// https://github.com/kubernetes-sigs/gateway-api/pull/3200/. +// We are overriding the UnmarshalJSON function to be able to handle cases where +// users had the old version of the GatewayClass CRD applied with SupportedFeatures +// as a list of strings and not list of objects. +// See https://github.com/kubernetes-sigs/gateway-api/issues/3464 +// for more information. + +func (s *SupportedFeature) UnmarshalJSON(data []byte) error { + var oldSupportedFeature oldSupportedFeature + var unmarshalTypeErr *json.UnmarshalTypeError + if err := json.Unmarshal(data, &oldSupportedFeature); err == nil { + s.Name = FeatureName(oldSupportedFeature) + return nil + } else if !errors.As(err, &unmarshalTypeErr) { + // If the error is not a type error, return it + return err + } + + var si supportedFeatureInternal + if err := json.Unmarshal(data, &si); err != nil { + return err + } + s.Name = si.Name + return nil +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type supportedFeatureInternal struct { + // +required + Name FeatureName `json:"name"` +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type oldSupportedFeature string diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go index 91a8a3d26..5f9bde7a8 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go @@ -56,13 +56,16 @@ import ( // Implementations MAY also accept HTTP/2 connections with an upgrade from // HTTP/1, i.e. without prior knowledge. type GRPCRoute struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of GRPCRoute. + // +required Spec GRPCRouteSpec `json:"spec,omitempty"` // Status defines the current state of GRPCRoute. + // +optional Status GRPCRouteStatus `json:"status,omitempty"` } @@ -136,13 +139,17 @@ type GRPCRouteSpec struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 Hostnames []Hostname `json:"hostnames,omitempty"` // Rules are a list of GRPC matchers, filters and actions. // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? (has(self[0].matches) ? self[0].matches.size() : 0) : 0) + (self.size() > 1 ? (has(self[1].matches) ? self[1].matches.size() : 0) : 0) + (self.size() > 2 ? (has(self[2].matches) ? self[2].matches.size() : 0) : 0) + (self.size() > 3 ? (has(self[3].matches) ? self[3].matches.size() : 0) : 0) + (self.size() > 4 ? (has(self[4].matches) ? self[4].matches.size() : 0) : 0) + (self.size() > 5 ? (has(self[5].matches) ? self[5].matches.size() : 0) : 0) + (self.size() > 6 ? (has(self[6].matches) ? self[6].matches.size() : 0) : 0) + (self.size() > 7 ? (has(self[7].matches) ? self[7].matches.size() : 0) : 0) + (self.size() > 8 ? (has(self[8].matches) ? self[8].matches.size() : 0) : 0) + (self.size() > 9 ? (has(self[9].matches) ? self[9].matches.size() : 0) : 0) + (self.size() > 10 ? (has(self[10].matches) ? self[10].matches.size() : 0) : 0) + (self.size() > 11 ? (has(self[11].matches) ? self[11].matches.size() : 0) : 0) + (self.size() > 12 ? (has(self[12].matches) ? self[12].matches.size() : 0) : 0) + (self.size() > 13 ? (has(self[13].matches) ? self[13].matches.size() : 0) : 0) + (self.size() > 14 ? (has(self[14].matches) ? self[14].matches.size() : 0) : 0) + (self.size() > 15 ? (has(self[15].matches) ? self[15].matches.size() : 0) : 0) <= 128" + // Rules []GRPCRouteRule `json:"rules,omitempty"` } @@ -150,6 +157,12 @@ type GRPCRouteSpec struct { // conditions (matches), processing it (filters), and forwarding the request to // an API object (backendRefs). type GRPCRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + // Matches define conditions used for matching the rule against incoming // gRPC requests. Each match is independent, i.e. this rule will be matched // if **any** one of the matches is satisfied. @@ -201,7 +214,8 @@ type GRPCRouteRule struct { // the above criteria. // // +optional - // +kubebuilder:validation:MaxItems=8 + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Matches []GRPCRouteMatch `json:"matches,omitempty"` // Filters define the filters that are applied to requests that match @@ -221,7 +235,7 @@ type GRPCRouteRule struct { // Specifying the same filter multiple times is not supported unless explicitly // indicated in the filter. // - // If an implementation can not support a combination of filters, it must clearly + // If an implementation cannot support a combination of filters, it must clearly // document that limitation. In cases where incompatible or unsupported // filters are specified and cause the `Accepted` condition to be set to status // `False`, implementations may use the `IncompatibleFilters` reason to specify @@ -230,6 +244,7 @@ type GRPCRouteRule struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" @@ -265,6 +280,7 @@ type GRPCRouteRule struct { // Support for weight: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 BackendRefs []GRPCBackendRef `json:"backendRefs,omitempty"` @@ -387,7 +403,7 @@ type GRPCHeaderMatch struct { // // +optional // +kubebuilder:default=Exact - Type *HeaderMatchType `json:"type,omitempty"` + Type *GRPCHeaderMatchType `json:"type,omitempty"` // Name is the name of the gRPC Header to be matched. // @@ -396,12 +412,14 @@ type GRPCHeaderMatch struct { // entries with an equivalent header name MUST be ignored. Due to the // case-insensitivity of header names, "foo" and "Foo" are considered // equivalent. + // +required Name GRPCHeaderName `json:"name"` // Value is the value of the gRPC Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } @@ -512,6 +530,7 @@ type GRPCRouteFilter struct { // +unionDiscriminator // +kubebuilder:validation:Enum=ResponseHeaderModifier;RequestHeaderModifier;RequestMirror;ExtensionRef // + // +required Type GRPCRouteFilterType `json:"type"` // RequestHeaderModifier defines a schema for a filter that modifies request @@ -541,6 +560,8 @@ type GRPCRouteFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` // ExtensionRef is an optional, implementation-specific extension to the @@ -620,6 +641,7 @@ type GRPCBackendRef struct { // Filters field in GRPCRouteRule.) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go index 736e80982..3d89af0da 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go @@ -33,13 +33,16 @@ import ( // used to specify additional processing steps. Backends specify where matching // requests should be routed. type HTTPRoute struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of HTTPRoute. + // +required Spec HTTPRouteSpec `json:"spec"` // Status defines the current state of HTTPRoute. + // +optional Status HTTPRouteStatus `json:"status,omitempty"` } @@ -111,14 +114,18 @@ type HTTPRouteSpec struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 Hostnames []Hostname `json:"hostnames,omitempty"` // Rules are a list of HTTP matchers, filters and actions. // // +optional + // +listType=atomic + // // +kubebuilder:validation:MaxItems=16 // +kubebuilder:default={{matches: {{path: {type: "PathPrefix", value: "/"}}}}} + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? self[0].matches.size() : 0) + (self.size() > 1 ? self[1].matches.size() : 0) + (self.size() > 2 ? self[2].matches.size() : 0) + (self.size() > 3 ? self[3].matches.size() : 0) + (self.size() > 4 ? self[4].matches.size() : 0) + (self.size() > 5 ? self[5].matches.size() : 0) + (self.size() > 6 ? self[6].matches.size() : 0) + (self.size() > 7 ? self[7].matches.size() : 0) + (self.size() > 8 ? self[8].matches.size() : 0) + (self.size() > 9 ? self[9].matches.size() : 0) + (self.size() > 10 ? self[10].matches.size() : 0) + (self.size() > 11 ? self[11].matches.size() : 0) + (self.size() > 12 ? self[12].matches.size() : 0) + (self.size() > 13 ? self[13].matches.size() : 0) + (self.size() > 14 ? self[14].matches.size() : 0) + (self.size() > 15 ? self[15].matches.size() : 0) <= 128" Rules []HTTPRouteRule `json:"rules,omitempty"` } @@ -132,6 +139,12 @@ type HTTPRouteSpec struct { // +kubebuilder:validation:XValidation:message="Within backendRefs, when using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" // +kubebuilder:validation:XValidation:message="Within backendRefs, When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" type HTTPRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + // Matches define conditions used for matching the rule against incoming // HTTP requests. Each match is independent, i.e. this rule will be matched // if **any** one of the matches is satisfied. @@ -190,7 +203,8 @@ type HTTPRouteRule struct { // parent a request is coming from, a HTTP 404 status code MUST be returned. // // +optional - // +kubebuilder:validation:MaxItems=8 + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 // +kubebuilder:default={{path:{ type: "PathPrefix", value: "/"}}} Matches []HTTPRouteMatch `json:"matches,omitempty"` @@ -201,7 +215,7 @@ type HTTPRouteRule struct { // they are specified. // // Implementations MAY choose to implement this ordering strictly, rejecting - // any combination or order of filters that can not be supported. If implementations + // any combination or order of filters that cannot be supported. If implementations // choose a strict interpretation of filter ordering, they MUST clearly document // that behavior. // @@ -223,7 +237,7 @@ type HTTPRouteRule struct { // // All filters are expected to be compatible with each other except for the // URLRewrite and RequestRedirect filters, which may not be combined. If an - // implementation can not support other combinations of filters, they must clearly + // implementation cannot support other combinations of filters, they must clearly // document that limitation. In cases where incompatible or unsupported // filters are specified and cause the `Accepted` condition to be set to status // `False`, implementations may use the `IncompatibleFilters` reason to specify @@ -232,6 +246,7 @@ type HTTPRouteRule struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" @@ -263,6 +278,11 @@ type HTTPRouteRule struct { // invalid, 50 percent of traffic must receive a 500. Implementations may // choose how that 50 percent is determined. // + // When a HTTPBackendRef refers to a Service that has no ready endpoints, + // implementations SHOULD return a 503 for requests to that backend instead. + // If an implementation chooses to do this, all of the above rules for 500 responses + // MUST also apply for responses that return a 503. + // // Support: Core for Kubernetes Service // // Support: Extended for Kubernetes ServiceImport @@ -272,6 +292,7 @@ type HTTPRouteRule struct { // Support for weight: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"` @@ -280,9 +301,16 @@ type HTTPRouteRule struct { // Support: Extended // // +optional - // Timeouts *HTTPRouteTimeouts `json:"timeouts,omitempty"` + // Retry defines the configuration for when to retry an HTTP request. + // + // Support: Extended + // + // +optional + // + Retry *HTTPRouteRetry `json:"retry,omitempty"` + // SessionPersistence defines and configures session persistence // for the route rule. // @@ -316,7 +344,8 @@ type HTTPRouteTimeouts struct { // request stream has been received instead of immediately after the transaction is // initiated by the client. // - // When this field is unspecified, request timeout behavior is implementation-specific. + // The value of Request is a Gateway API Duration string as defined by GEP-2257. When this + // field is unspecified, request timeout behavior is implementation-specific. // // Support: Extended // @@ -336,8 +365,10 @@ type HTTPRouteTimeouts struct { // may result in more than one call from the gateway to the destination backend, // for example, if automatic retries are supported. // - // Because the Request timeout encompasses the BackendRequest timeout, the value of - // BackendRequest must be <= the value of Request timeout. + // The value of BackendRequest must be a Gateway API Duration string as defined by + // GEP-2257. When this field is unspecified, its behavior is implementation-specific; + // when specified, the value of BackendRequest must be no more than the value of the + // Request timeout (since the Request timeout encompasses the BackendRequest timeout). // // Support: Extended // @@ -345,6 +376,96 @@ type HTTPRouteTimeouts struct { BackendRequest *Duration `json:"backendRequest,omitempty"` } +// HTTPRouteRetry defines retry configuration for an HTTPRoute. +// +// Implementations SHOULD retry on connection errors (disconnect, reset, timeout, +// TCP failure) if a retry stanza is configured. +type HTTPRouteRetry struct { + // Codes defines the HTTP response status codes for which a backend request + // should be retried. + // + // Support: Extended + // + // +optional + // +listType=atomic + Codes []HTTPRouteRetryStatusCode `json:"codes,omitempty"` + + // Attempts specifies the maximum number of times an individual request + // from the gateway to a backend should be retried. + // + // If the maximum number of retries has been attempted without a successful + // response from the backend, the Gateway MUST return an error. + // + // When this field is unspecified, the number of times to attempt to retry + // a backend request is implementation-specific. + // + // Support: Extended + // + // +optional + Attempts *int `json:"attempts,omitempty"` + + // Backoff specifies the minimum duration a Gateway should wait between + // retry attempts and is represented in Gateway API Duration formatting. + // + // For example, setting the `rules[].retry.backoff` field to the value + // `100ms` will cause a backend request to first be retried approximately + // 100 milliseconds after timing out or receiving a response code configured + // to be retryable. + // + // An implementation MAY use an exponential or alternative backoff strategy + // for subsequent retry attempts, MAY cap the maximum backoff duration to + // some amount greater than the specified minimum, and MAY add arbitrary + // jitter to stagger requests, as long as unsuccessful backend requests are + // not retried before the configured minimum duration. + // + // If a Request timeout (`rules[].timeouts.request`) is configured on the + // route, the entire duration of the initial request and any retry attempts + // MUST not exceed the Request timeout duration. If any retry attempts are + // still in progress when the Request timeout duration has been reached, + // these SHOULD be canceled if possible and the Gateway MUST immediately + // return a timeout error. + // + // If a BackendRequest timeout (`rules[].timeouts.backendRequest`) is + // configured on the route, any retry attempts which reach the configured + // BackendRequest timeout duration without a response SHOULD be canceled if + // possible and the Gateway should wait for at least the specified backoff + // duration before attempting to retry the backend request again. + // + // If a BackendRequest timeout is _not_ configured on the route, retry + // attempts MAY time out after an implementation default duration, or MAY + // remain pending until a configured Request timeout or implementation + // default duration for total request time is reached. + // + // When this field is unspecified, the time to wait between retry attempts + // is implementation-specific. + // + // Support: Extended + // + // +optional + Backoff *Duration `json:"backoff,omitempty"` +} + +// HTTPRouteRetryStatusCode defines an HTTP response status code for +// which a backend request should be retried. +// +// Implementations MUST support the following status codes as retryable: +// +// * 500 +// * 502 +// * 503 +// * 504 +// +// Implementations MAY support specifying additional discrete values in the +// 500-599 range. +// +// Implementations MAY support specifying discrete values in the 400-499 range, +// which are often inadvisable to retry. +// +// +kubebuilder:validation:Minimum:=400 +// +kubebuilder:validation:Maximum:=599 +// +type HTTPRouteRetryStatusCode int + // PathMatchType specifies the semantics of how HTTP paths should be compared. // Valid PathMatchType values, along with their support levels, are: // @@ -374,7 +495,7 @@ const ( PathMatchExact PathMatchType = "Exact" // Matches based on a URL path prefix split by `/`. Matching is - // case sensitive and done on a path element by element basis. A + // case-sensitive and done on a path element by element basis. A // path element refers to the list of labels in the path split by // the `/` separator. When specified, a trailing `/` is ignored. // @@ -483,7 +604,7 @@ type HTTPHeaderMatch struct { Type *HeaderMatchType `json:"type,omitempty"` // Name is the name of the HTTP Header to be matched. Name matching MUST be - // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). // // If multiple entries specify equivalent header names, only the first // entry with an equivalent name MUST be considered for a match. Subsequent @@ -496,12 +617,14 @@ type HTTPHeaderMatch struct { // Generally, proxies should follow the guidance from the RFC: // https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 regarding // processing a repeated header, with special handling for "Set-Cookie". + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } @@ -563,12 +686,14 @@ type HTTPQueryParamMatch struct { // // Users SHOULD NOT route traffic based on repeated query params to guard // themselves against potential differences in the implementations. + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP query param to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 + // +required Value string `json:"value"` } @@ -588,6 +713,9 @@ type HTTPQueryParamMatch struct { // +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH type HTTPMethod string +// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH;* +type HTTPMethodWithWildcard string + const ( HTTPMethodGet HTTPMethod = "GET" HTTPMethodHead HTTPMethod = "HEAD" @@ -674,6 +802,10 @@ type HTTPRouteMatch struct { // +kubebuilder:validation:XValidation:message="filter.requestRedirect must be specified for RequestRedirect filter.type",rule="!(!has(self.requestRedirect) && self.type == 'RequestRedirect')" // +kubebuilder:validation:XValidation:message="filter.urlRewrite must be nil if the filter.type is not URLRewrite",rule="!(has(self.urlRewrite) && self.type != 'URLRewrite')" // +kubebuilder:validation:XValidation:message="filter.urlRewrite must be specified for URLRewrite filter.type",rule="!(!has(self.urlRewrite) && self.type == 'URLRewrite')" +// +// +// +// // +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" // +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" type HTTPRouteFilter struct { @@ -712,6 +844,8 @@ type HTTPRouteFilter struct { // // +unionDiscriminator // +kubebuilder:validation:Enum=RequestHeaderModifier;ResponseHeaderModifier;RequestMirror;RequestRedirect;URLRewrite;ExtensionRef + // + // +required Type HTTPRouteFilterType `json:"type"` // RequestHeaderModifier defines a schema for a filter that modifies request @@ -741,6 +875,8 @@ type HTTPRouteFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` // RequestRedirect defines a schema for a filter that responds to the @@ -758,6 +894,28 @@ type HTTPRouteFilter struct { // +optional URLRewrite *HTTPURLRewriteFilter `json:"urlRewrite,omitempty"` + // CORS defines a schema for a filter that responds to the + // cross-origin request based on HTTP response header. + // + // Support: Extended + // + // +optional + // + CORS *HTTPCORSFilter `json:"cors,omitempty"` + + // ExternalAuth configures settings related to sending request details + // to an external auth service. The external service MUST authenticate + // the request, and MAY authorize the request as well. + // + // If there is any problem communicating with the external service, + // this filter MUST fail closed. + // + // Support: Extended + // + // +optional + // + ExternalAuth *HTTPExternalAuthFilter `json:"externalAuth,omitempty"` + // ExtensionRef is an optional, implementation-specific extension to the // "filter" behavior. For example, resource "myroutefilter" in group // "networking.example.net"). ExtensionRef MUST NOT be used for core and @@ -820,6 +978,27 @@ const ( // Support in HTTPBackendRef: Extended HTTPRouteFilterRequestMirror HTTPRouteFilterType = "RequestMirror" + // HTTPRouteFilterCORS can be used to add CORS headers to an + // HTTP response before it is sent to the client. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + // + HTTPRouteFilterCORS HTTPRouteFilterType = "CORS" + + // HTTPRouteFilterExternalAuth can be used to configure a Gateway implementation + // to call out to an external Auth server, which MUST perform Authentication + // and MAY perform Authorization on the matched request before the request + // is forwarded to the backend. + // + // Support in HTTPRouteRule: Extended + // + // Feature Name: HTTPRouteExternalAuth + // + // + HTTPRouteFilterExternalAuth HTTPRouteFilterType = "ExternalAuth" + // HTTPRouteFilterExtensionRef should be used for configuring custom // HTTP filters. // @@ -832,28 +1011,30 @@ const ( // HTTPHeader represents an HTTP Header name and value as defined by RFC 7230. type HTTPHeader struct { // Name is the name of the HTTP Header to be matched. Name matching MUST be - // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). // // If multiple entries specify equivalent header names, the first entry with // an equivalent name MUST be considered for a match. Subsequent entries // with an equivalent header name MUST be ignored. Due to the // case-insensitivity of header names, "foo" and "Foo" are considered // equivalent. + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } // HTTPHeaderFilter defines a filter that modifies the headers of an HTTP -// request or response. Only one action for a given header name is permitted. -// Filters specifying multiple actions of the same or different type for any one -// header name are invalid and will be rejected by CRD validation. -// Configuration to set or add multiple values for a header must use RFC 7230 -// header value formatting, separating each value with a comma. +// request or response. Only one action for a given header name is +// permitted. Filters specifying multiple actions of the same or different +// type for any one header name are invalid. Configuration to set or add +// multiple values for a header must use RFC 7230 header value formatting, +// separating each value with a comma. type HTTPHeaderFilter struct { // Set overwrites the request with the given header (name, value) // before the action. @@ -963,6 +1144,7 @@ type HTTPPathModifier struct { // Reason of `UnsupportedValue`. // // +kubebuilder:validation:Enum=ReplaceFullPath;ReplacePrefixMatch + // +required Type HTTPPathModifierType `json:"type"` // ReplaceFullPath specifies the value with which to replace the full path @@ -1070,6 +1252,9 @@ type HTTPRequestRedirectFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` // StatusCode is the HTTP status code to be used in response. @@ -1136,7 +1321,438 @@ type HTTPRequestMirrorFilter struct { // Support: Extended for Kubernetes Service // // Support: Implementation-specific for any other resource + // +required BackendRef BackendObjectReference `json:"backendRef"` + + // Percent represents the percentage of requests that should be + // mirrored to BackendRef. Its minimum value is 0 (indicating 0% of + // requests) and its maximum value is 100 (indicating 100% of requests). + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + Percent *int32 `json:"percent,omitempty"` + + // Fraction represents the fraction of requests that should be + // mirrored to BackendRef. + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + Fraction *Fraction `json:"fraction,omitempty"` +} + +// HTTPCORSFilter defines a filter that that configures Cross-Origin Request +// Sharing (CORS). +type HTTPCORSFilter struct { + // AllowOrigins indicates whether the response can be shared with requested + // resource from the given `Origin`. + // + // The `Origin` consists of a scheme and a host, with an optional port, and + // takes the form `://(:)`. + // + // Valid values for scheme are: `http` and `https`. + // + // Valid values for port are any integer between 1 and 65535 (the list of + // available TCP/UDP ports). Note that, if not included, port `80` is + // assumed for `http` scheme origins, and port `443` is assumed for `https` + // origins. This may affect origin matching. + // + // The host part of the origin may contain the wildcard character `*`. These + // wildcard characters behave as follows: + // + // * `*` is a greedy match to the _left_, including any number of + // DNS labels to the left of its position. This also means that + // `*` will include any number of period `.` characters to the + // left of its position. + // * A wildcard by itself matches all hosts. + // + // An origin value that includes _only_ the `*` character indicates requests + // from all `Origin`s are allowed. + // + // When the `AllowOrigins` field is configured with multiple origins, it + // means the server supports clients from multiple origins. If the request + // `Origin` matches the configured allowed origins, the gateway must return + // the given `Origin` and sets value of the header + // `Access-Control-Allow-Origin` same as the `Origin` header provided by the + // client. + // + // The status code of a successful response to a "preflight" request is + // always an OK status (i.e., 204 or 200). + // + // If the request `Origin` does not match the configured allowed origins, + // the gateway returns 204/200 response but doesn't set the relevant + // cross-origin response headers. Alternatively, the gateway responds with + // 403 status to the "preflight" request is denied, coupled with omitting + // the CORS headers. The cross-origin request fails on the client side. + // Therefore, the client doesn't attempt the actual cross-origin request. + // + // The `Access-Control-Allow-Origin` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowOrigins` field + // specified with the `*` wildcard, the gateway must return a single origin + // in the value of the `Access-Control-Allow-Origin` response header, + // instead of specifying the `*` wildcard. The value of the header + // `Access-Control-Allow-Origin` is same as the `Origin` header provided by + // the client. + // + // Support: Extended + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="AllowOrigins cannot contain '*' alongside other origins",rule="!('*' in self && self.size() > 1)" + // +optional + AllowOrigins []CORSOrigin `json:"allowOrigins,omitempty"` + + // AllowCredentials indicates whether the actual cross-origin request allows + // to include credentials. + // + // When set to true, the gateway will include the `Access-Control-Allow-Credentials` + // response header with value true (case-sensitive). + // + // When set to false or omitted the gateway will omit the header + // `Access-Control-Allow-Credentials` entirely (this is the standard CORS + // behavior). + // + // Support: Extended + // + // +optional + AllowCredentials *bool `json:"allowCredentials,omitempty"` + + // AllowMethods indicates which HTTP methods are supported for accessing the + // requested resource. + // + // Valid values are any method defined by RFC9110, along with the special + // value `*`, which represents all HTTP methods are allowed. + // + // Method names are case sensitive, so these values are also case-sensitive. + // (See https://www.rfc-editor.org/rfc/rfc2616#section-5.1.1) + // + // Multiple method names in the value of the `Access-Control-Allow-Methods` + // response header are separated by a comma (","). + // + // A CORS-safelisted method is a method that is `GET`, `HEAD`, or `POST`. + // (See https://fetch.spec.whatwg.org/#cors-safelisted-method) The + // CORS-safelisted methods are always allowed, regardless of whether they + // are specified in the `AllowMethods` field. + // + // When the `AllowMethods` field is configured with one or more methods, the + // gateway must return the `Access-Control-Allow-Methods` response header + // which value is present in the `AllowMethods` field. + // + // If the HTTP method of the `Access-Control-Request-Method` request header + // is not included in the list of methods specified by the response header + // `Access-Control-Allow-Methods`, it will present an error on the client + // side. + // + // The `Access-Control-Allow-Methods` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowMethods` field + // specified with the `*` wildcard, the gateway must specify one HTTP method + // in the value of the Access-Control-Allow-Methods response header. The + // value of the header `Access-Control-Allow-Methods` is same as the + // `Access-Control-Request-Method` header provided by the client. If the + // header `Access-Control-Request-Method` is not included in the request, + // the gateway will omit the `Access-Control-Allow-Methods` response header, + // instead of specifying the `*` wildcard. A Gateway implementation may + // choose to add implementation-specific default methods. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:XValidation:message="AllowMethods cannot contain '*' alongside other methods",rule="!('*' in self && self.size() > 1)" + // +optional + AllowMethods []HTTPMethodWithWildcard `json:"allowMethods,omitempty"` + + // AllowHeaders indicates which HTTP request headers are supported for + // accessing the requested resource. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Allow-Headers` + // response header are separated by a comma (","). + // + // When the `AllowHeaders` field is configured with one or more headers, the + // gateway must return the `Access-Control-Allow-Headers` response header + // which value is present in the `AllowHeaders` field. + // + // If any header name in the `Access-Control-Request-Headers` request header + // is not included in the list of header names specified by the response + // header `Access-Control-Allow-Headers`, it will present an error on the + // client side. + // + // If any header name in the `Access-Control-Allow-Headers` response header + // does not recognize by the client, it will also occur an error on the + // client side. + // + // A wildcard indicates that the requests with all HTTP headers are allowed. + // The `Access-Control-Allow-Headers` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowHeaders` field + // specified with the `*` wildcard, the gateway must specify one or more + // HTTP headers in the value of the `Access-Control-Allow-Headers` response + // header. The value of the header `Access-Control-Allow-Headers` is same as + // the `Access-Control-Request-Headers` header provided by the client. If + // the header `Access-Control-Request-Headers` is not included in the + // request, the gateway will omit the `Access-Control-Allow-Headers` + // response header, instead of specifying the `*` wildcard. A Gateway + // implementation may choose to add implementation-specific default headers. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +optional + AllowHeaders []HTTPHeaderName `json:"allowHeaders,omitempty"` + + // ExposeHeaders indicates which HTTP response headers can be exposed + // to client-side scripts in response to a cross-origin request. + // + // A CORS-safelisted response header is an HTTP header in a CORS response + // that it is considered safe to expose to the client scripts. + // The CORS-safelisted response headers include the following headers: + // `Cache-Control` + // `Content-Language` + // `Content-Length` + // `Content-Type` + // `Expires` + // `Last-Modified` + // `Pragma` + // (See https://fetch.spec.whatwg.org/#cors-safelisted-response-header-name) + // The CORS-safelisted response headers are exposed to client by default. + // + // When an HTTP header name is specified using the `ExposeHeaders` field, + // this additional header will be exposed as part of the response to the + // client. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Expose-Headers` + // response header are separated by a comma (","). + // + // A wildcard indicates that the responses with all HTTP headers are exposed + // to clients. The `Access-Control-Expose-Headers` response header can only + // use `*` wildcard as value when the `AllowCredentials` field is false or omitted. + // + // Support: Extended + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems=64 + ExposeHeaders []HTTPHeaderName `json:"exposeHeaders,omitempty"` + + // MaxAge indicates the duration (in seconds) for the client to cache the + // results of a "preflight" request. + // + // The information provided by the `Access-Control-Allow-Methods` and + // `Access-Control-Allow-Headers` response headers can be cached by the + // client until the time specified by `Access-Control-Max-Age` elapses. + // + // The default value of `Access-Control-Max-Age` response header is 5 + // (seconds). + // + // +optional + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + MaxAge int32 `json:"maxAge,omitempty"` +} + +// HTTPRouteExternalAuthProtcol specifies what protocol should be used +// for communicating with an external authorization server. +// +// Valid values are supplied as constants below. +type HTTPRouteExternalAuthProtocol string + +const ( + HTTPRouteExternalAuthGRPCProtocol HTTPRouteExternalAuthProtocol = "GRPC" + HTTPRouteExternalAuthHTTPProtocol HTTPRouteExternalAuthProtocol = "HTTP" +) + +// HTTPExternalAuthFilter defines a filter that modifies requests by sending +// request details to an external authorization server. +// +// Support: Extended +// Feature Name: HTTPRouteExternalAuth +// +kubebuilder:validation:XValidation:message="grpc must be specified when protocol is set to 'GRPC'",rule="self.protocol == 'GRPC' ? has(self.grpc) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'GRPC' when grpc is set",rule="has(self.grpc) ? self.protocol == 'GRPC' : true" +// +kubebuilder:validation:XValidation:message="http must be specified when protocol is set to 'HTTP'",rule="self.protocol == 'HTTP' ? has(self.http) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'HTTP' when http is set",rule="has(self.http) ? self.protocol == 'HTTP' : true" +type HTTPExternalAuthFilter struct { + // ExternalAuthProtocol describes which protocol to use when communicating with an + // ext_authz authorization server. + // + // When this is set to GRPC, each backend must use the Envoy ext_authz protocol + // on the port specified in `backendRefs`. Requests and responses are defined + // in the protobufs explained at: + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto + // + // When this is set to HTTP, each backend must respond with a `200` status + // code in on a successful authorization. Any other code is considered + // an authorization failure. + // + // Feature Names: + // GRPC Support - HTTPRouteExternalAuthGRPC + // HTTP Support - HTTPRouteExternalAuthHTTP + // + // +unionDiscriminator + // +required + // +kubebuilder:validation:Enum=HTTP;GRPC + ExternalAuthProtocol HTTPRouteExternalAuthProtocol `json:"protocol,omitempty"` + + // BackendRef is a reference to a backend to send authorization + // requests to. + // + // The backend must speak the selected protocol (GRPC or HTTP) on the + // referenced port. + // + // If the backend service requires TLS, use BackendTLSPolicy to tell the + // implementation to supply the TLS details to be used to connect to that + // backend. + // + // +required + BackendRef BackendObjectReference `json:"backendRef,omitempty"` + + // GRPCAuthConfig contains configuration for communication with ext_authz + // protocol-speaking backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + GRPCAuthConfig *GRPCAuthConfig `json:"grpc,omitempty"` + + // HTTPAuthConfig contains configuration for communication with HTTP-speaking + // backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + HTTPAuthConfig *HTTPAuthConfig `json:"http,omitempty"` + + // ForwardBody controls if requests to the authorization server should include + // the body of the client request; and if so, how big that body is allowed + // to be. + // + // It is expected that implementations will buffer the request body up to + // `forwardBody.maxSize` bytes. Bodies over that size must be rejected with a + // 4xx series error (413 or 403 are common examples), and fail processing + // of the filter. + // + // If unset, or `forwardBody.maxSize` is set to `0`, then the body will not + // be forwarded. + // + // Feature Name: HTTPRouteExternalAuthForwardBody + // + // + // +optional + ForwardBody *ForwardBodyConfig `json:"forwardBody,omitempty"` +} + +// GRPCAuthConfig contains configuration for communication with Auth server +// backends that speak Envoy's ext_authz gRPC protocol. +// +// Requests and responses are defined in the protobufs explained at: +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto +type GRPCAuthConfig struct { + // AllowedRequestHeaders specifies what headers from the client request + // will be sent to the authorization server. + // + // If this list is empty, then all headers must be sent. + // + // If the list has entries, only those entries must be sent. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` +} + +// HTTPAuthConfig contains configuration for communication with HTTP-speaking +// backends. +type HTTPAuthConfig struct { + // Path sets the prefix that paths from the client request will have added + // when forwarded to the authorization server. + // + // When empty or unspecified, no prefix is added. + // + // Valid values are the same as the "value" regex for path values in the `match` + // stanza, and the validation regex will screen out invalid paths in the same way. + // Even with the validation, implementations MUST sanitize this input before using it + // directly. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Pattern="^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$" + Path string `json:"path,omitempty"` + + // AllowedRequestHeaders specifies what additional headers from the client request + // will be sent to the authorization server. + // + // The following headers must always be sent to the authorization server, + // regardless of this setting: + // + // * `Host` + // * `Method` + // * `Path` + // * `Content-Length` + // * `Authorization` + // + // If this list is empty, then only those headers must be sent. + // + // Note that `Content-Length` has a special behavior, in that the length + // sent must be correct for the actual request to the external authorization + // server - that is, it must reflect the actual number of bytes sent in the + // body of the request to the authorization server. + // + // So if the `forwardBody` stanza is unset, or `forwardBody.maxSize` is set + // to `0`, then `Content-Length` must be `0`. If `forwardBody.maxSize` is set + // to anything other than `0`, then the `Content-Length` of the authorization + // request must be set to the actual number of bytes forwarded. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` + + // AllowedResponseHeaders specifies what headers from the authorization response + // will be copied into the request to the backend. + // + // If this list is empty, then all headers from the authorization server + // except Authority or Host must be copied. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedResponseHeaders []string `json:"allowedResponseHeaders,omitempty"` +} + +// ForwardBody configures if requests to the authorization server should include +// the body of the client request; and if so, how big that body is allowed +// to be. +// +// If empty or unset, do not forward the body. +type ForwardBodyConfig struct { + // MaxSize specifies how large in bytes the largest body that will be buffered + // and sent to the authorization server. If the body size is larger than + // `maxSize`, then the body sent to the authorization server must be + // truncated to `maxSize` bytes. + // + // Experimental note: This behavior needs to be checked against + // various dataplanes; it may need to be changed. + // See https://github.com/kubernetes-sigs/gateway-api/pull/4001#discussion_r2291405746 + // for more. + // + // If 0, the body will not be sent to the authorization server. + // +optional + MaxSize uint16 `json:"maxSize,omitempty"` } // HTTPBackendRef defines how a HTTPRoute forwards a HTTP request. @@ -1215,9 +1831,9 @@ type HTTPBackendRef struct { // Filters field in HTTPRouteRule.) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" - // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go index 421572ace..414e39b94 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go @@ -27,12 +27,15 @@ package v1 type LocalObjectReference struct { // Group is the group of the referent. For example, "gateway.networking.k8s.io". // When unspecified or empty string, core API group is inferred. + // +required Group Group `json:"group"` // Kind is kind of the referent. For example "HTTPRoute" or "Service". + // +required Kind Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` } @@ -60,6 +63,7 @@ type SecretObjectReference struct { Kind *Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the referenced object. When unspecified, the local @@ -121,6 +125,7 @@ type BackendObjectReference struct { Kind *Kind `json:"kind,omitempty"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the backend. When unspecified, the local @@ -143,6 +148,8 @@ type BackendObjectReference struct { // resource or this field. // // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` } @@ -156,13 +163,16 @@ type BackendObjectReference struct { // on the containing object. type ObjectReference struct { // Group is the group of the referent. For example, "gateway.networking.k8s.io". - // When unspecified or empty string, core API group is inferred. + // When set to the empty string, core API group is inferred. + // +required Group Group `json:"group"` // Kind is kind of the referent. For example "ConfigMap" or "Service". + // +required Kind Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the referenced object. When unspecified, the local diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go new file mode 100644 index 000000000..552db9bf7 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go @@ -0,0 +1,279 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // PolicyLabelKey is the label whose presence identifies a CRD that the + // Gateway API Policy attachment model. The value of the label SHOULD be one + // of the following: + // - A label value of "Inherited" indicates that this Policy is inheritable. + // An example of inheritable policy is one which if applied at the Gateway + // level would affect all attached HTTPRoutes and their respective + // Backends. + // - A label value of "Direct" indicates that the policy only affects the + // resource to which it is attached and does not affect it's sub resources. + PolicyLabelKey = "gateway.networking.k8s.io/policy" +) + +// LocalPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to. This should be used as part of Policy resources +// that can target Gateway API resources. For more information on how this +// policy attachment model works, and a sample Policy resource, refer to +// the policy attachment documentation for Gateway API. +type LocalPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` +} + +// NamespacedPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to, potentially in a different namespace. This should only +// be used as part of Policy resources that need to be able to target resources +// in different namespaces. For more information on how this policy attachment +// model works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +type NamespacedPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referent. When unspecified, the local + // namespace is inferred. Even when policy targets a resource in a different + // namespace, it MUST only apply to traffic originating from the same + // namespace as the policy. + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// LocalPolicyTargetReferenceWithSectionName identifies an API object to apply a +// direct policy to. This should be used as part of Policy resources that can +// target single resources. For more information on how this policy attachment +// mode works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +// +// Note: This should only be used for direct policy attachment when references +// to SectionName are actually needed. In all other cases, +// LocalPolicyTargetReference should be used. +type LocalPolicyTargetReferenceWithSectionName struct { + LocalPolicyTargetReference `json:",inline"` + + // SectionName is the name of a section within the target resource. When + // unspecified, this targetRef targets the entire resource. In the following + // resources, SectionName is interpreted as the following: + // + // * Gateway: Listener name + // * HTTPRoute: HTTPRouteRule name + // * Service: Port name + // + // If a SectionName is specified, but does not exist on the targeted object, + // the Policy must fail to attach, and the policy implementation should record + // a `ResolvedRefs` or similar Condition in the Policy's status. + // + // +optional + SectionName *SectionName `json:"sectionName,omitempty"` +} + +// PolicyConditionType is a type of condition for a policy. This type should be +// used with a Policy resource Status.Conditions field. +type PolicyConditionType string + +// PolicyConditionReason is a reason for a policy condition. +type PolicyConditionReason string + +const ( + // PolicyConditionAccepted indicates whether the policy has been accepted or + // rejected by a targeted resource, and why. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "Conflicted" + // * "Invalid" + // * "TargetNotFound" + // + PolicyConditionAccepted PolicyConditionType = "Accepted" + + // PolicyReasonAccepted is used with the "Accepted" condition when the policy + // has been accepted by the targeted resource. + PolicyReasonAccepted PolicyConditionReason = "Accepted" + + // PolicyReasonConflicted is used with the "Accepted" condition when the + // policy has not been accepted by a targeted resource because there is + // another policy that targets the same resource and a merge is not possible. + PolicyReasonConflicted PolicyConditionReason = "Conflicted" + + // PolicyReasonInvalid is used with the "Accepted" condition when the policy + // is syntactically or semantically invalid. + PolicyReasonInvalid PolicyConditionReason = "Invalid" + + // PolicyReasonTargetNotFound is used with the "Accepted" condition when the + // policy is attached to an invalid target resource. + PolicyReasonTargetNotFound PolicyConditionReason = "TargetNotFound" +) + +// PolicyAncestorStatus describes the status of a route with respect to an +// associated Ancestor. +// +// Ancestors refer to objects that are either the Target of a policy or above it +// in terms of object hierarchy. For example, if a policy targets a Service, the +// Policy's Ancestors are, in order, the Service, the HTTPRoute, the Gateway, and +// the GatewayClass. Almost always, in this hierarchy, the Gateway will be the most +// useful object to place Policy status on, so we recommend that implementations +// SHOULD use Gateway as the PolicyAncestorStatus object unless the designers +// have a _very_ good reason otherwise. +// +// In the context of policy attachment, the Ancestor is used to distinguish which +// resource results in a distinct application of this policy. For example, if a policy +// targets a Service, it may have a distinct result per attached Gateway. +// +// Policies targeting the same resource may have different effects depending on the +// ancestors of those resources. For example, different Gateways targeting the same +// Service may have different capabilities, especially if they have different underlying +// implementations. +// +// For example, in BackendTLSPolicy, the Policy attaches to a Service that is +// used as a backend in a HTTPRoute that is itself attached to a Gateway. +// In this case, the relevant object for status is the Gateway, and that is the +// ancestor object referred to in this status. +// +// Note that a parent is also an ancestor, so for objects where the parent is the +// relevant object for status, this struct SHOULD still be used. +// +// This struct is intended to be used in a slice that's effectively a map, +// with a composite key made up of the AncestorRef and the ControllerName. +type PolicyAncestorStatus struct { + // AncestorRef corresponds with a ParentRef in the spec that this + // PolicyAncestorStatus struct describes the status of. + // +required + AncestorRef ParentReference `json:"ancestorRef"` + + // ControllerName is a domain/path string that indicates the name of the + // controller that wrote this status. This corresponds with the + // controllerName field on GatewayClass. + // + // Example: "example.net/gateway-controller". + // + // The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + // valid Kubernetes names + // (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + // + // Controllers MUST populate this field when writing status. Controllers should ensure that + // entries to status populated with their ControllerName are cleaned up when they are no + // longer necessary. + // +required + ControllerName GatewayController `json:"controllerName"` + + // Conditions describes the status of the Policy with respect to the given Ancestor. + // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +required + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// PolicyStatus defines the common attributes that all Policies should include within +// their status. +type PolicyStatus struct { + // Ancestors is a list of ancestor resources (usually Gateways) that are + // associated with the policy, and the status of the policy with respect to + // each ancestor. When this policy attaches to a parent, the controller that + // manages the parent and the ancestors MUST add an entry to this list when + // the controller first sees the policy and SHOULD update the entry as + // appropriate when the relevant ancestor is modified. + // + // Note that choosing the relevant ancestor is left to the Policy designers; + // an important part of Policy design is designing the right object level at + // which to namespace this status. + // + // Note also that implementations MUST ONLY populate ancestor status for + // the Ancestor resources they are responsible for. Implementations MUST + // use the ControllerName field to uniquely identify the entries in this list + // that they are responsible for. + // + // Note that to achieve this, the list of PolicyAncestorStatus structs + // MUST be treated as a map with a composite key, made up of the AncestorRef + // and ControllerName fields combined. + // + // A maximum of 16 ancestors will be represented in this list. An empty list + // means the Policy is not relevant for any ancestors. + // + // If this slice is full, implementations MUST NOT add further entries. + // Instead they MUST consider the policy unimplementable and signal that + // on any related resources such as the ancestor that would be referenced + // here. For example, if this list was full on BackendTLSPolicy, no + // additional Gateways would be able to reference the Service targeted by + // the BackendTLSPolicy. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + Ancestors []PolicyAncestorStatus `json:"ancestors"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go index bed2cc8b8..eb8806837 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go @@ -86,6 +86,7 @@ type ParentReference struct { // Name is the name of the referent. // // Support: Core + // +required Name ObjectName `json:"name"` // SectionName is the name of a section within the target resource. In the @@ -148,9 +149,31 @@ type ParentReference struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` } +// GatewayDefaultScope defines the set of default scopes that a Gateway +// can claim, for use in any Route type. At present the only supported +// scopes are "All" and "None". "None" is a special scope which +// explicitly means that the Route MUST NOT attached to any default +// Gateway. +// +// +kubebuilder:validation:Enum=All;None +type GatewayDefaultScope string + +const ( + // GatewayDefaultScopeAll indicates that a Gateway can claim absolutely + // any Route asking for a default Gateway. + GatewayDefaultScopeAll GatewayDefaultScope = "All" + + // GatewayDefaultScopeNone indicates that a Gateway MUST NOT claim + // any Route asking for a default Gateway. + GatewayDefaultScopeNone GatewayDefaultScope = "None" +) + // CommonRouteSpec defines the common attributes that all Routes MUST include // within their spec. type CommonRouteSpec struct { @@ -218,19 +241,34 @@ type CommonRouteSpec struct { // // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=32 // // // // ParentRefs []ParentReference `json:"parentRefs,omitempty"` + + // UseDefaultGateways indicates the default Gateway scope to use for this + // Route. If unset (the default) or set to None, the Route will not be + // attached to any default Gateway; if set, it will be attached to any + // default Gateway supporting the named scope, subject to the usual rules + // about which Routes a Gateway is allowed to claim. + // + // Think carefully before using this functionality! The set of default + // Gateways supporting the requested scope can change over time without + // any notice to the Route author, and in many situations it will not be + // appropriate to request a default Gateway for a given Route -- for + // example, a Route with specific security requirements should almost + // certainly not use a default Gateway. + // + // +optional + // + UseDefaultGateways GatewayDefaultScope `json:"useDefaultGateways,omitempty"` } // PortNumber defines a network port. -// -// +kubebuilder:validation:Minimum=1 -// +kubebuilder:validation:Maximum=65535 -type PortNumber int32 +type PortNumber = int32 // BackendRef defines how a Route should forward a request to a Kubernetes // resource. @@ -436,6 +474,7 @@ const ( type RouteParentStatus struct { // ParentRef corresponds with a ParentRef in the spec that this // RouteParentStatus struct describes the status of. + // +required ParentRef ParentReference `json:"parentRef"` // ControllerName is a domain/path string that indicates the name of the @@ -451,6 +490,7 @@ type RouteParentStatus struct { // Controllers MUST populate this field when writing status. Controllers should ensure that // entries to status populated with their ControllerName are cleaned up when they are no // longer necessary. + // +required ControllerName GatewayController `json:"controllerName"` // Conditions describes the status of the route with respect to the Gateway. @@ -469,14 +509,45 @@ type RouteParentStatus struct { // There are a number of cases where the "Accepted" condition may not be set // due to lack of controller visibility, that includes when: // - // * The Route refers to a non-existent parent. + // * The Route refers to a nonexistent parent. // * The Route is of a type that the controller does not support. // * The Route is in a namespace the controller does not have access to. // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +listType=map // +listMapKey=type // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=8 + // +required Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -498,6 +569,31 @@ type RouteStatus struct { // A maximum of 32 Gateways will be represented in this list. An empty list // means the route has not been attached to any Gateway. // + // + // Notes for implementors: + // + // While parents is not a listType `map`, this is due to the fact that the + // list key is not scalar, and Kubernetes is unable to represent this. + // + // Parent status MUST be considered to be namespaced by the combination of + // the parentRef and controllerName fields, and implementations should keep + // the following rules in mind when updating this status: + // + // * Implementations MUST update only entries that have a matching value of + // `controllerName` for that implementation. + // * Implementations MUST NOT update entries with non-matching `controllerName` + // fields. + // * Implementations MUST treat each `parentRef`` in the Route separately and + // update its status based on the relationship with that parent. + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // + // + // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=32 Parents []RouteParentStatus `json:"parents"` } @@ -535,6 +631,30 @@ type Hostname string // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` type PreciseHostname string +// AbsoluteURI represents a Uniform Resource Identifier (URI) as defined by RFC3986. + +// The AbsoluteURI MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The AbsoluteURI MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part. URIs that +// include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex is taken from the regex section in RFC 3986 with a slight modification to enforce a full URI and not relative. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(([^:/?#]+):)(//([^/?#]*))([^?#]*)(\?([^#]*))?(#(.*))?` +type AbsoluteURI string + +// The CORSOrigin MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The CORSOrigin MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part, or it should be a single '*' character. +// URIs that include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex was generated to simplify the assertion of scheme://host: being port optional +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`(^\*$)|(^([a-zA-Z][a-zA-Z0-9+\-.]+):\/\/([^:/?#]+)(:([0-9]{1,5}))?$)` +type CORSOrigin string + // Group refers to a Kubernetes Group. It must either be an empty string or a // RFC 1123 subdomain. // @@ -662,11 +782,11 @@ type GatewayController string // Invalid values include: // // * example~ - "~" is an invalid character -// * example.com. - can not start or end with "." +// * example.com. - cannot start or end with "." // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 -// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]/?)*$` +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` type AnnotationKey string // AnnotationValue is the value of an annotation in Gateway API. This is used @@ -678,6 +798,45 @@ type AnnotationKey string // +kubebuilder:validation:MaxLength=4096 type AnnotationValue string +// LabelKey is the key of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// "qualified name" validation that is used for labels. +// +// Valid values include: +// +// * example +// * example.com +// * example.com/path +// * example.com/path.html +// +// Invalid values include: +// +// * example~ - "~" is an invalid character +// * example.com. - cannot start or end with "." +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` +type LabelKey string + +// LabelValue is the value of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// label validation rules: +// * must be 63 characters or less (can be empty), +// * unless empty, must begin and end with an alphanumeric character ([a-z0-9A-Z]), +// * could contain dashes (-), underscores (_), dots (.), and alphanumerics between. +// +// Valid values include: +// +// * MyValue +// * my.name +// * 123-my-value +// +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` +type LabelValue string + // AddressType defines how a network address is represented as a text string. // This may take two possible forms: // @@ -719,7 +878,7 @@ const ( // (see [RFC 5952](https://tools.ietf.org/html/rfc5952)). // // This type is intended for specific addresses. Address ranges are not - // supported (e.g. you can not use a CIDR range like 127.0.0.0/24 as an + // supported (e.g. you cannot use a CIDR range like 127.0.0.0/24 as an // IPAddress). // // Support: Extended @@ -745,7 +904,7 @@ const ( ) // SessionPersistence defines the desired state of SessionPersistence. -// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" +// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig) || !has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" type SessionPersistence struct { // SessionName defines the name of the persistent session token // which may be reflected in the cookie or the header. Users @@ -830,6 +989,8 @@ type CookieConfig struct { // absolute lifetime of the cookie tracked by the gateway and // is optional. // + // Defaults to "Session". + // // Support: Core for "Session" type // // Support: Extended for "Permanent" type @@ -855,3 +1016,15 @@ const ( // Support: Extended PermanentCookieLifetimeType CookieLifetimeType = "Permanent" ) + +// +kubebuilder:validation:XValidation:message="numerator must be less than or equal to denominator",rule="self.numerator <= self.denominator" +type Fraction struct { + // +kubebuilder:validation:Minimum=0 + // +required + Numerator int32 `json:"numerator"` + + // +optional + // +kubebuilder:default=100 + // +kubebuilder:validation:Minimum=1 + Denominator *int32 `json:"denominator,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go index ddb9bb9d4..174c29270 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go @@ -25,6 +25,26 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedListeners) DeepCopyInto(out *AllowedListeners) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(ListenerNamespaces) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedListeners. +func (in *AllowedListeners) DeepCopy() *AllowedListeners { + if in == nil { + return nil + } + out := new(AllowedListeners) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedRoutes) DeepCopyInto(out *AllowedRoutes) { *out = *in @@ -108,6 +128,125 @@ func (in *BackendRef) DeepCopy() *BackendRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicy) DeepCopyInto(out *BackendTLSPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicy. +func (in *BackendTLSPolicy) DeepCopy() *BackendTLSPolicy { + if in == nil { + return nil + } + out := new(BackendTLSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyList) DeepCopyInto(out *BackendTLSPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackendTLSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyList. +func (in *BackendTLSPolicyList) DeepCopy() *BackendTLSPolicyList { + if in == nil { + return nil + } + out := new(BackendTLSPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicySpec) DeepCopyInto(out *BackendTLSPolicySpec) { + *out = *in + if in.TargetRefs != nil { + in, out := &in.TargetRefs, &out.TargetRefs + *out = make([]LocalPolicyTargetReferenceWithSectionName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Validation.DeepCopyInto(&out.Validation) + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicySpec. +func (in *BackendTLSPolicySpec) DeepCopy() *BackendTLSPolicySpec { + if in == nil { + return nil + } + out := new(BackendTLSPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyValidation) DeepCopyInto(out *BackendTLSPolicyValidation) { + *out = *in + if in.CACertificateRefs != nil { + in, out := &in.CACertificateRefs, &out.CACertificateRefs + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.WellKnownCACertificates != nil { + in, out := &in.WellKnownCACertificates, &out.WellKnownCACertificates + *out = new(WellKnownCACertificatesType) + **out = **in + } + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]SubjectAltName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyValidation. +func (in *BackendTLSPolicyValidation) DeepCopy() *BackendTLSPolicyValidation { + if in == nil { + return nil + } + out := new(BackendTLSPolicyValidation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommonRouteSpec) DeepCopyInto(out *CommonRouteSpec) { *out = *in @@ -150,6 +289,64 @@ func (in *CookieConfig) DeepCopy() *CookieConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardBodyConfig) DeepCopyInto(out *ForwardBodyConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardBodyConfig. +func (in *ForwardBodyConfig) DeepCopy() *ForwardBodyConfig { + if in == nil { + return nil + } + out := new(ForwardBodyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fraction) DeepCopyInto(out *Fraction) { + *out = *in + if in.Denominator != nil { + in, out := &in.Denominator, &out.Denominator + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fraction. +func (in *Fraction) DeepCopy() *Fraction { + if in == nil { + return nil + } + out := new(Fraction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendTLSConfig) DeepCopyInto(out *FrontendTLSConfig) { + *out = *in + in.Default.DeepCopyInto(&out.Default) + if in.PerPort != nil { + in, out := &in.PerPort, &out.PerPort + *out = make([]TLSPortConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendTLSConfig. +func (in *FrontendTLSConfig) DeepCopy() *FrontendTLSConfig { + if in == nil { + return nil + } + out := new(FrontendTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FrontendTLSValidation) DeepCopyInto(out *FrontendTLSValidation) { *out = *in @@ -172,6 +369,26 @@ func (in *FrontendTLSValidation) DeepCopy() *FrontendTLSValidation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCAuthConfig) DeepCopyInto(out *GRPCAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAuthConfig. +func (in *GRPCAuthConfig) DeepCopy() *GRPCAuthConfig { + if in == nil { + return nil + } + out := new(GRPCAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCBackendRef) DeepCopyInto(out *GRPCBackendRef) { *out = *in @@ -200,7 +417,7 @@ func (in *GRPCHeaderMatch) DeepCopyInto(out *GRPCHeaderMatch) { *out = *in if in.Type != nil { in, out := &in.Type, &out.Type - *out = new(HeaderMatchType) + *out = new(GRPCHeaderMatchType) **out = **in } } @@ -369,6 +586,11 @@ func (in *GRPCRouteMatch) DeepCopy() *GRPCRouteMatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCRouteRule) DeepCopyInto(out *GRPCRouteRule) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } if in.Matches != nil { in, out := &in.Matches, &out.Matches *out = make([]GRPCRouteMatch, len(*in)) @@ -479,21 +701,21 @@ func (in *Gateway) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GatewayAddress) DeepCopyInto(out *GatewayAddress) { +func (in *GatewayBackendTLS) DeepCopyInto(out *GatewayBackendTLS) { *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(AddressType) - **out = **in + if in.ClientCertificateRef != nil { + in, out := &in.ClientCertificateRef, &out.ClientCertificateRef + *out = new(SecretObjectReference) + (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayAddress. -func (in *GatewayAddress) DeepCopy() *GatewayAddress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayBackendTLS. +func (in *GatewayBackendTLS) DeepCopy() *GatewayBackendTLS { if in == nil { return nil } - out := new(GatewayAddress) + out := new(GatewayBackendTLS) in.DeepCopyInto(out) return out } @@ -614,7 +836,7 @@ func (in *GatewayInfrastructure) DeepCopyInto(out *GatewayInfrastructure) { *out = *in if in.Labels != nil { in, out := &in.Labels, &out.Labels - *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + *out = make(map[LabelKey]LabelValue, len(*in)) for key, val := range *in { (*out)[key] = val } @@ -687,7 +909,7 @@ func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]GatewayAddress, len(*in)) + *out = make([]GatewaySpecAddress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -697,6 +919,16 @@ func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { *out = new(GatewayInfrastructure) (*in).DeepCopyInto(*out) } + if in.AllowedListeners != nil { + in, out := &in.AllowedListeners, &out.AllowedListeners + *out = new(AllowedListeners) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(GatewayTLSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. @@ -709,6 +941,26 @@ func (in *GatewaySpec) DeepCopy() *GatewaySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpecAddress) DeepCopyInto(out *GatewaySpecAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpecAddress. +func (in *GatewaySpecAddress) DeepCopy() *GatewaySpecAddress { + if in == nil { + return nil + } + out := new(GatewaySpecAddress) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { *out = *in @@ -768,29 +1020,15 @@ func (in *GatewayStatusAddress) DeepCopy() *GatewayStatusAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatewayTLSConfig) DeepCopyInto(out *GatewayTLSConfig) { *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(TLSModeType) - **out = **in - } - if in.CertificateRefs != nil { - in, out := &in.CertificateRefs, &out.CertificateRefs - *out = make([]SecretObjectReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FrontendValidation != nil { - in, out := &in.FrontendValidation, &out.FrontendValidation - *out = new(FrontendTLSValidation) + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(GatewayBackendTLS) (*in).DeepCopyInto(*out) } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[AnnotationKey]AnnotationValue, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.Frontend != nil { + in, out := &in.Frontend, &out.Frontend + *out = new(FrontendTLSConfig) + (*in).DeepCopyInto(*out) } } @@ -804,6 +1042,31 @@ func (in *GatewayTLSConfig) DeepCopy() *GatewayTLSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPAuthConfig) DeepCopyInto(out *HTTPAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedResponseHeaders != nil { + in, out := &in.AllowedResponseHeaders, &out.AllowedResponseHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuthConfig. +func (in *HTTPAuthConfig) DeepCopy() *HTTPAuthConfig { + if in == nil { + return nil + } + out := new(HTTPAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPBackendRef) DeepCopyInto(out *HTTPBackendRef) { *out = *in @@ -827,6 +1090,77 @@ func (in *HTTPBackendRef) DeepCopy() *HTTPBackendRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCORSFilter) DeepCopyInto(out *HTTPCORSFilter) { + *out = *in + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]CORSOrigin, len(*in)) + copy(*out, *in) + } + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]HTTPMethodWithWildcard, len(*in)) + copy(*out, *in) + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCORSFilter. +func (in *HTTPCORSFilter) DeepCopy() *HTTPCORSFilter { + if in == nil { + return nil + } + out := new(HTTPCORSFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPExternalAuthFilter) DeepCopyInto(out *HTTPExternalAuthFilter) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.GRPCAuthConfig != nil { + in, out := &in.GRPCAuthConfig, &out.GRPCAuthConfig + *out = new(GRPCAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPAuthConfig != nil { + in, out := &in.HTTPAuthConfig, &out.HTTPAuthConfig + *out = new(HTTPAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.ForwardBody != nil { + in, out := &in.ForwardBody, &out.ForwardBody + *out = new(ForwardBodyConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPExternalAuthFilter. +func (in *HTTPExternalAuthFilter) DeepCopy() *HTTPExternalAuthFilter { + if in == nil { + return nil + } + out := new(HTTPExternalAuthFilter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { *out = *in @@ -966,6 +1300,16 @@ func (in *HTTPQueryParamMatch) DeepCopy() *HTTPQueryParamMatch { func (in *HTTPRequestMirrorFilter) DeepCopyInto(out *HTTPRequestMirrorFilter) { *out = *in in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Percent != nil { + in, out := &in.Percent, &out.Percent + *out = new(int32) + **out = **in + } + if in.Fraction != nil { + in, out := &in.Fraction, &out.Fraction + *out = new(Fraction) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMirrorFilter. @@ -1073,6 +1417,16 @@ func (in *HTTPRouteFilter) DeepCopyInto(out *HTTPRouteFilter) { *out = new(HTTPURLRewriteFilter) (*in).DeepCopyInto(*out) } + if in.CORS != nil { + in, out := &in.CORS, &out.CORS + *out = new(HTTPCORSFilter) + (*in).DeepCopyInto(*out) + } + if in.ExternalAuth != nil { + in, out := &in.ExternalAuth, &out.ExternalAuth + *out = new(HTTPExternalAuthFilter) + (*in).DeepCopyInto(*out) + } if in.ExtensionRef != nil { in, out := &in.ExtensionRef, &out.ExtensionRef *out = new(LocalObjectReference) @@ -1161,9 +1515,44 @@ func (in *HTTPRouteMatch) DeepCopy() *HTTPRouteMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetry) DeepCopyInto(out *HTTPRouteRetry) { + *out = *in + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]HTTPRouteRetryStatusCode, len(*in)) + copy(*out, *in) + } + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(int) + **out = **in + } + if in.Backoff != nil { + in, out := &in.Backoff, &out.Backoff + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetry. +func (in *HTTPRouteRetry) DeepCopy() *HTTPRouteRetry { + if in == nil { + return nil + } + out := new(HTTPRouteRetry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } if in.Matches != nil { in, out := &in.Matches, &out.Matches *out = make([]HTTPRouteMatch, len(*in)) @@ -1190,6 +1579,11 @@ func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { *out = new(HTTPRouteTimeouts) (*in).DeepCopyInto(*out) } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(HTTPRouteRetry) + (*in).DeepCopyInto(*out) + } if in.SessionPersistence != nil { in, out := &in.SessionPersistence, &out.SessionPersistence *out = new(SessionPersistence) @@ -1311,7 +1705,7 @@ func (in *Listener) DeepCopyInto(out *Listener) { } if in.TLS != nil { in, out := &in.TLS, &out.TLS - *out = new(GatewayTLSConfig) + *out = new(ListenerTLSConfig) (*in).DeepCopyInto(*out) } if in.AllowedRoutes != nil { @@ -1331,6 +1725,31 @@ func (in *Listener) DeepCopy() *Listener { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerNamespaces) DeepCopyInto(out *ListenerNamespaces) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(FromNamespaces) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerNamespaces. +func (in *ListenerNamespaces) DeepCopy() *ListenerNamespaces { + if in == nil { + return nil + } + out := new(ListenerNamespaces) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListenerStatus) DeepCopyInto(out *ListenerStatus) { *out = *in @@ -1360,6 +1779,40 @@ func (in *ListenerStatus) DeepCopy() *ListenerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSConfig) DeepCopyInto(out *ListenerTLSConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(TLSModeType) + **out = **in + } + if in.CertificateRefs != nil { + in, out := &in.CertificateRefs, &out.CertificateRefs + *out = make([]SecretObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSConfig. +func (in *ListenerTLSConfig) DeepCopy() *ListenerTLSConfig { + if in == nil { + return nil + } + out := new(ListenerTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { *out = *in @@ -1390,6 +1843,62 @@ func (in *LocalParametersReference) DeepCopy() *LocalParametersReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReference) DeepCopyInto(out *LocalPolicyTargetReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReference. +func (in *LocalPolicyTargetReference) DeepCopy() *LocalPolicyTargetReference { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopyInto(out *LocalPolicyTargetReferenceWithSectionName) { + *out = *in + out.LocalPolicyTargetReference = in.LocalPolicyTargetReference + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(SectionName) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReferenceWithSectionName. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopy() *LocalPolicyTargetReferenceWithSectionName { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReferenceWithSectionName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedPolicyTargetReference) DeepCopyInto(out *NamespacedPolicyTargetReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedPolicyTargetReference. +func (in *NamespacedPolicyTargetReference) DeepCopy() *NamespacedPolicyTargetReference { + if in == nil { + return nil + } + out := new(NamespacedPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in @@ -1470,6 +1979,51 @@ func (in *ParentReference) DeepCopy() *ParentReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAncestorStatus) DeepCopyInto(out *PolicyAncestorStatus) { + *out = *in + in.AncestorRef.DeepCopyInto(&out.AncestorRef) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAncestorStatus. +func (in *PolicyAncestorStatus) DeepCopy() *PolicyAncestorStatus { + if in == nil { + return nil + } + out := new(PolicyAncestorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + if in.Ancestors != nil { + in, out := &in.Ancestors, &out.Ancestors + *out = make([]PolicyAncestorStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteGroupKind) DeepCopyInto(out *RouteGroupKind) { *out = *in @@ -1629,3 +2183,69 @@ func (in *SessionPersistence) DeepCopy() *SessionPersistence { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAltName) DeepCopyInto(out *SubjectAltName) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAltName. +func (in *SubjectAltName) DeepCopy() *SubjectAltName { + if in == nil { + return nil + } + out := new(SubjectAltName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportedFeature) DeepCopyInto(out *SupportedFeature) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportedFeature. +func (in *SupportedFeature) DeepCopy() *SupportedFeature { + if in == nil { + return nil + } + out := new(SupportedFeature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(FrontendTLSValidation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSPortConfig) DeepCopyInto(out *TLSPortConfig) { + *out = *in + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSPortConfig. +func (in *TLSPortConfig) DeepCopy() *TLSPortConfig { + if in == nil { + return nil + } + out := new(TLSPortConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go index 9c8db216a..1f390588e 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go @@ -22,16 +22,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName specifies the group name used to register the objects. const GroupName = "gateway.networking.k8s.io" // GroupVersion specifies the group and the version used to register the objects. -var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} +var GroupVersion = metav1.GroupVersion{Group: GroupName, Version: "v1"} // SchemeGroupVersion is group version used to register these objects // Deprecated: use GroupVersion instead. @@ -61,6 +61,8 @@ func init() { // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &BackendTLSPolicy{}, + &BackendTLSPolicyList{}, &GRPCRoute{}, &GRPCRouteList{}, &Gateway{}, @@ -71,6 +73,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &HTTPRouteList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. - v1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index d538ac119..3fe528bbf 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -52,8 +52,8 @@ import ( // - bool, for JSON booleans // - float64, for JSON numbers // - string, for JSON strings -// - []interface{}, for JSON arrays -// - map[string]interface{}, for JSON objects +// - []any, for JSON arrays +// - map[string]any, for JSON objects // - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length @@ -117,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. -// -// By convention, to approximate the behavior of [Unmarshal] itself, -// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } @@ -132,7 +129,7 @@ type UnmarshalTypeError struct { Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field - Field string // the full path from root node to the field + Field string // the full path from root node to the field, include embedded struct } func (e *UnmarshalTypeError) Error() string { @@ -281,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() - err.Field = strings.Join(d.errorContext.FieldStack, ".") + fieldStack := d.errorContext.FieldStack + if err.Field != "" { + fieldStack = append(fieldStack, err.Field) + } + err.Field = strings.Join(fieldStack, ".") } } return err @@ -492,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm } // Prevent infinite loop if v is an interface pointing to its own address: - // var v interface{} + // var v any // v = &v - if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) { v = v.Elem() break } @@ -784,7 +785,10 @@ func (d *decodeState) object(v reflect.Value) error { } subv = v destring = f.quoted - for _, i := range f.index { + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + for i, ind := range f.index { if subv.Kind() == reflect.Pointer { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, @@ -804,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Elem() } - subv = subv.Field(i) - } - if d.errorContext == nil { - d.errorContext = new(errorContext) + if i < len(f.index)-1 { + d.errorContext.FieldStack = append( + d.errorContext.FieldStack, + subv.Type().Field(ind).Name, + ) + } + subv = subv.Field(ind) } - d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.appendStrictFieldStackKey(f.name) } else if d.disallowUnknownFields { d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key))) @@ -1118,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. -// valueInterface is like value but returns interface{} +// valueInterface is like value but returns any. func (d *decodeState) valueInterface() (val any) { switch d.opcode { default: @@ -1135,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) { return } -// arrayInterface is like array but returns []interface{}. +// arrayInterface is like array but returns []any. func (d *decodeState) arrayInterface() []any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { @@ -1170,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any { return v } -// objectInterface is like object but returns map[string]interface{}. +// objectInterface is like object but returns map[string]any. func (d *decodeState) objectInterface() map[string]any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index eb73bff58..4e3a1a2f1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -71,8 +71,8 @@ import ( // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as -// false, 0, a nil pointer, a nil interface value, and any empty array, -// slice, map, or string. +// false, 0, a nil pointer, a nil interface value, and any array, +// slice, map, or string of length zero. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". @@ -98,6 +98,17 @@ import ( // // Field appears in JSON as key "-". // Field int `json:"-,"` // +// The "omitzero" option specifies that the field should be omitted +// from the encoding if the field has a zero value, according to rules: +// +// 1) If the field type has an "IsZero() bool" method, that will be used to +// determine whether the value is zero. +// +// 2) Otherwise, the value is zero if it is the zero value for its type. +// +// If both "omitempty" and "omitzero" are specified, the field will be omitted +// if the value is either empty or zero (or both). +// // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used @@ -690,7 +701,8 @@ FieldLoop: fv = fv.Field(i) } - if f.omitEmpty && isEmptyValue(fv) { + if (f.omitEmpty && isEmptyValue(fv)) || + (f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) { continue } e.WriteByte(next) @@ -808,7 +820,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe + ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe len int }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { @@ -1039,11 +1051,19 @@ type field struct { index []int typ reflect.Type omitEmpty bool + omitZero bool + isZero func(reflect.Value) bool quoted bool encoder encoderFunc } +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeFor[isZeroer]() + // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. @@ -1135,6 +1155,7 @@ func typeFields(t reflect.Type) structFields { index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), + omitZero: opts.Contains("omitzero"), quoted: quoted, } field.nameBytes = []byte(field.name) @@ -1144,6 +1165,40 @@ func typeFields(t reflect.Type) structFields { field.nameEscHTML = `"` + string(nameEscBuf) + `":` field.nameNonEsc = `"` + field.name + `":` + if field.omitZero { + t := sf.Type + // Provide a function that uses a type's IsZero method. + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return v.IsNil() || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return v.IsNil() || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + } + } + fields = append(fields, field) if count[f.typ] > 1 { // If there were multiple instances, add a second, diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 48fc4d945..cc2108b92 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -31,8 +31,8 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// [Number] instead of as a float64. +// UseNumber causes the Decoder to unmarshal a number into an +// interface value as a [Number] instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination